hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
5bf256ea9783f639cefb76930fbe02fc511041fb | 43 | fn main() {
println!("Hello world");
}
| 10.75 | 28 | 0.534884 |
bbdff7faceb50f189728d4aa6dec1a2ce68044d6 | 5,634 | use pyo3::prelude::*;
use pyo3::types::IntoPyDict;
#[allow(clippy::trivially_copy_pass_by_ref)]
fn _get_subclasses<'p>(
py: &'p Python,
py_type: &str,
args: &str,
) -> PyResult<(&'p PyAny, &'p PyAny, &'p PyAny)> {
// Import the class from Python and create some subclasses
let datetime = py.import("datetime")?;
let locals = [(py_type, datetime.get(py_type)?)].into_py_dict(*py);
let make_subclass_py = format!("class Subklass({}):\n pass", py_type);
let make_sub_subclass_py = "class SubSubklass(Subklass):\n pass";
py.run(&make_subclass_py, None, Some(&locals))?;
py.run(&make_sub_subclass_py, None, Some(&locals))?;
// Construct an instance of the base class
let obj = py.eval(&format!("{}({})", py_type, args), None, Some(&locals))?;
// Construct an instance of the subclass
let sub_obj = py.eval(&format!("Subklass({})", args), None, Some(&locals))?;
// Construct an instance of the sub-subclass
let sub_sub_obj = py.eval(&format!("SubSubklass({})", args), None, Some(&locals))?;
Ok((obj, sub_obj, sub_sub_obj))
}
macro_rules! assert_check_exact {
($check_func:ident, $obj: expr) => {
unsafe {
use pyo3::{AsPyPointer, ffi::*};
assert!($check_func(($obj).as_ptr()) != 0);
assert!(pyo3::paste::expr!([<$check_func Exact>])(($obj).as_ptr()) != 0);
}
};
}
macro_rules! assert_check_only {
($check_func:ident, $obj: expr) => {
unsafe {
use pyo3::{AsPyPointer, ffi::*};
assert!($check_func(($obj).as_ptr()) != 0);
assert!(pyo3::paste::expr!([<$check_func Exact>])(($obj).as_ptr()) == 0);
}
};
}
#[test]
fn test_date_check() {
let gil = Python::acquire_gil();
let py = gil.python();
let (obj, sub_obj, sub_sub_obj) = _get_subclasses(&py, "date", "2018, 1, 1").unwrap();
assert_check_exact!(PyDate_Check, obj);
assert_check_only!(PyDate_Check, sub_obj);
assert_check_only!(PyDate_Check, sub_sub_obj);
}
#[test]
fn test_time_check() {
let gil = Python::acquire_gil();
let py = gil.python();
let (obj, sub_obj, sub_sub_obj) = _get_subclasses(&py, "time", "12, 30, 15").unwrap();
assert_check_exact!(PyTime_Check, obj);
assert_check_only!(PyTime_Check, sub_obj);
assert_check_only!(PyTime_Check, sub_sub_obj);
}
#[test]
fn test_datetime_check() {
let gil = Python::acquire_gil();
let py = gil.python();
let (obj, sub_obj, sub_sub_obj) = _get_subclasses(&py, "datetime", "2018, 1, 1, 13, 30, 15")
.map_err(|e| e.print(py))
.unwrap();
assert_check_only!(PyDate_Check, obj);
assert_check_exact!(PyDateTime_Check, obj);
assert_check_only!(PyDateTime_Check, sub_obj);
assert_check_only!(PyDateTime_Check, sub_sub_obj);
}
#[test]
fn test_delta_check() {
let gil = Python::acquire_gil();
let py = gil.python();
let (obj, sub_obj, sub_sub_obj) = _get_subclasses(&py, "timedelta", "1, -3").unwrap();
assert_check_exact!(PyDelta_Check, obj);
assert_check_only!(PyDelta_Check, sub_obj);
assert_check_only!(PyDelta_Check, sub_sub_obj);
}
#[test]
fn test_datetime_utc() {
use assert_approx_eq::assert_approx_eq;
use pyo3::types::PyDateTime;
let gil = Python::acquire_gil();
let py = gil.python();
let datetime = py.import("datetime").map_err(|e| e.print(py)).unwrap();
let timezone = datetime.get("timezone").unwrap();
let utc = timezone.getattr("utc").unwrap().to_object(py);
let dt = PyDateTime::new(py, 2018, 1, 1, 0, 0, 0, 0, Some(&utc)).unwrap();
let locals = [("dt", dt)].into_py_dict(py);
let offset: f32 = py
.eval("dt.utcoffset().total_seconds()", None, Some(locals))
.unwrap()
.extract()
.unwrap();
assert_approx_eq!(offset, 0f32);
}
static INVALID_DATES: &[(i32, u8, u8)] = &[
(-1, 1, 1),
(0, 1, 1),
(10000, 1, 1),
(2 << 30, 1, 1),
(2018, 0, 1),
(2018, 13, 1),
(2018, 1, 0),
(2017, 2, 29),
(2018, 1, 32),
];
static INVALID_TIMES: &[(u8, u8, u8, u32)] =
&[(25, 0, 0, 0), (255, 0, 0, 0), (0, 60, 0, 0), (0, 0, 61, 0)];
#[test]
fn test_pydate_out_of_bounds() {
use pyo3::types::PyDate;
let gil = Python::acquire_gil();
let py = gil.python();
for val in INVALID_DATES {
let (year, month, day) = val;
let dt = PyDate::new(py, *year, *month, *day);
dt.unwrap_err();
}
}
#[test]
fn test_pytime_out_of_bounds() {
use pyo3::types::PyTime;
let gil = Python::acquire_gil();
let py = gil.python();
for val in INVALID_TIMES {
let (hour, minute, second, microsecond) = val;
let dt = PyTime::new(py, *hour, *minute, *second, *microsecond, None);
dt.unwrap_err();
}
}
#[test]
fn test_pydatetime_out_of_bounds() {
use pyo3::types::PyDateTime;
use std::iter;
let gil = Python::acquire_gil();
let py = gil.python();
let valid_time = (0, 0, 0, 0);
let valid_date = (2018, 1, 1);
let invalid_dates = INVALID_DATES.iter().zip(iter::repeat(&valid_time));
let invalid_times = iter::repeat(&valid_date).zip(INVALID_TIMES.iter());
let vals = invalid_dates.chain(invalid_times);
for val in vals {
let (date, time) = val;
let (year, month, day) = date;
let (hour, minute, second, microsecond) = time;
let dt = PyDateTime::new(
py,
*year,
*month,
*day,
*hour,
*minute,
*second,
*microsecond,
None,
);
dt.unwrap_err();
}
}
| 28.454545 | 96 | 0.590699 |
abe3d1ffd630a8ca2db9c14a33b4ad8a6fb3159a | 24,709 | use crate::avm2::activation::Activation;
use crate::avm2::bytearray::Endian;
use crate::avm2::class::{Class, ClassAttributes};
use crate::avm2::method::{Method, NativeMethod};
use crate::avm2::names::{Namespace, QName};
use crate::avm2::object::{Object, TObject};
use crate::avm2::string::AvmString;
use crate::avm2::value::Value;
use crate::avm2::Error;
use encoding_rs::Encoding;
use encoding_rs::UTF_8;
use gc_arena::{GcCell, MutationContext};
/// Implements `flash.utils.ByteArray`'s instance constructor.
pub fn instance_init<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
activation.super_init(this, &[])?;
}
Ok(Value::Undefined)
}
/// Implements `flash.utils.ByteArray`'s class constructor.
pub fn class_init<'gc>(
_activation: &mut Activation<'_, 'gc, '_>,
_this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
Ok(Value::Undefined)
}
/// Writes a single byte to the bytearray
pub fn write_byte<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let byte = args
.get(0)
.cloned()
.unwrap_or(Value::Undefined)
.coerce_to_i32(activation)?;
bytearray.write_byte(byte as u8);
}
}
Ok(Value::Undefined)
}
/// Writes multiple bytes to the bytearray from another bytearray
pub fn write_bytes<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(Value::Object(second_array)) = args.get(0) {
let combining_bytes = match second_array.as_bytearray() {
Some(b) => b.bytes().clone(),
None => return Err("ArgumentError: Parameter must be a bytearray".into()),
};
let offset = args
.get(1)
.unwrap_or(&Value::Unsigned(0))
.coerce_to_u32(activation)? as usize;
let length = args
.get(2)
.unwrap_or(&Value::Unsigned(0))
.coerce_to_u32(activation)? as usize;
// In the docs it says "If offset or length is out of range, they are clamped to the beginning and end of the bytes array."
// However, in the actual flash player, it seems to just raise an error.
if offset + length > combining_bytes.len() {
return Err("EOFError: Reached EOF".into());
}
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
bytearray.write_bytes(if length != 0 {
&combining_bytes[offset..length + offset]
} else {
&combining_bytes[offset..]
});
}
}
}
Ok(Value::Undefined)
}
// Reads the bytes from the current bytearray into another bytearray
pub fn read_bytes<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
let current_bytes = this
.as_bytearray_mut(activation.context.gc_context)
.unwrap()
.bytes()
.clone();
let position = this
.as_bytearray_mut(activation.context.gc_context)
.unwrap()
.position();
let mut merging_offset = 0;
if let Some(Value::Object(second_array)) = args.get(0) {
let offset = args
.get(1)
.unwrap_or(&Value::Unsigned(0))
.coerce_to_u32(activation)? as usize;
let length = args
.get(2)
.unwrap_or(&Value::Unsigned(0))
.coerce_to_u32(activation)? as usize;
if position + length > current_bytes.len() {
return Err("EOFError: Reached EOF".into());
}
if let Some(mut merging_storage) =
second_array.as_bytearray_mut(activation.context.gc_context)
{
let to_write = if length != 0 {
¤t_bytes[position..length + position]
} else {
¤t_bytes[position..]
};
merging_offset = to_write.len();
merging_storage.write_bytes_at(to_write, offset);
} else {
return Err("ArgumentError: Parameter must be a bytearray".into());
}
}
this.as_bytearray_mut(activation.context.gc_context)
.unwrap()
.add_position(merging_offset);
}
Ok(Value::Undefined)
}
pub fn write_utf<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
if let Some(utf_string) = args.get(0) {
let utf_string = utf_string.coerce_to_string(activation)?;
bytearray.write_utf(&utf_string.as_str())?;
}
}
}
Ok(Value::Undefined)
}
pub fn read_utf<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(AvmString::new(activation.context.gc_context, bytearray.read_utf()?).into());
}
}
Ok(Value::Undefined)
}
pub fn to_string<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(bytearray) = this.as_bytearray() {
let bytes = bytearray.bytes();
let (new_string, _, _) = UTF_8.decode(bytes);
return Ok(AvmString::new(activation.context.gc_context, new_string).into());
}
}
Ok(Value::Undefined)
}
pub fn clear<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
bytearray.clear();
}
}
Ok(Value::Undefined)
}
pub fn position<'gc>(
_activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(bytearray) = this.as_bytearray() {
return Ok(Value::Unsigned(bytearray.position() as u32));
}
}
Ok(Value::Undefined)
}
pub fn set_position<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let num = args
.get(0)
.unwrap_or(&Value::Integer(0))
.coerce_to_u32(activation)?;
bytearray.set_position(num as usize);
}
}
Ok(Value::Undefined)
}
pub fn bytes_available<'gc>(
_activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(bytearray) = this.as_bytearray() {
return Ok(Value::Unsigned(
if bytearray.position() > bytearray.bytes().len() {
0
} else {
(bytearray.bytes().len() - bytearray.position()) as u32
},
));
}
}
Ok(Value::Undefined)
}
pub fn length<'gc>(
_activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(bytearray) = this.as_bytearray() {
return Ok(Value::Unsigned(bytearray.bytes().len() as u32));
}
}
Ok(Value::Undefined)
}
pub fn set_length<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let len = args
.get(0)
.unwrap_or(&Value::Unsigned(0))
.coerce_to_u32(activation)? as usize;
bytearray.set_length(len);
}
}
Ok(Value::Undefined)
}
pub fn endian<'gc>(
_activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(bytearray) = this.as_bytearray() {
return Ok(match bytearray.endian() {
Endian::Big => "bigEndian".into(),
Endian::Little => "littleEndian".into(),
});
}
}
Ok(Value::Undefined)
}
pub fn set_endian<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
match args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_string(activation)?
.as_str()
{
"bigEndian" => bytearray.set_endian(Endian::Big),
"littleEndian" => bytearray.set_endian(Endian::Little),
_ => return Err("Parameter type must be one of the accepted values.".into()),
}
}
}
Ok(Value::Undefined)
}
pub fn read_short<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Integer(bytearray.read_short()? as i32));
}
}
Ok(Value::Undefined)
}
pub fn read_unsigned_short<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Unsigned(bytearray.read_unsigned_short()? as u32));
}
}
Ok(Value::Undefined)
}
pub fn read_double<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Number(bytearray.read_double()?));
}
}
Ok(Value::Undefined)
}
pub fn read_float<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Number(bytearray.read_float()? as f64));
}
}
Ok(Value::Undefined)
}
pub fn read_int<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Integer(bytearray.read_int()?));
}
}
Ok(Value::Undefined)
}
pub fn read_unsigned_int<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Unsigned(bytearray.read_unsigned_int()?));
}
}
Ok(Value::Undefined)
}
pub fn read_boolean<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Bool(bytearray.read_boolean()?));
}
}
Ok(Value::Undefined)
}
pub fn read_byte<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Integer(bytearray.read_byte()? as i32));
}
}
Ok(Value::Undefined)
}
pub fn read_utf_bytes<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let len = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_u32(activation)?;
return Ok(AvmString::new(
activation.context.gc_context,
String::from_utf8_lossy(&bytearray.read_exact(len as usize)?),
)
.into());
}
}
Ok(Value::Undefined)
}
pub fn read_unsigned_byte<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
return Ok(Value::Unsigned(bytearray.read_unsigned_byte()? as u32));
}
}
Ok(Value::Undefined)
}
pub fn write_float<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let num = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_number(activation)?;
bytearray.write_float(num as f32);
}
}
Ok(Value::Undefined)
}
pub fn write_double<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let num = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_number(activation)?;
bytearray.write_double(num);
}
}
Ok(Value::Undefined)
}
pub fn write_boolean<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let num = args.get(0).unwrap_or(&Value::Undefined).coerce_to_boolean();
bytearray.write_boolean(num);
}
}
Ok(Value::Undefined)
}
pub fn write_int<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let num = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_i32(activation)?;
bytearray.write_int(num);
}
}
Ok(Value::Undefined)
}
pub fn write_unsigned_int<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let num = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_u32(activation)?;
bytearray.write_unsigned_int(num);
}
}
Ok(Value::Undefined)
}
pub fn write_short<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let num = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_i32(activation)?;
bytearray.write_short(num as i16);
}
}
Ok(Value::Undefined)
}
pub fn write_multibyte<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let string = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_string(activation)?;
let charset_label = args
.get(1)
.unwrap_or(&"UTF-8".into())
.coerce_to_string(activation)?;
let encoder = Encoding::for_label(charset_label.as_bytes()).unwrap_or(UTF_8);
let (encoded_bytes, _, _) = encoder.encode(string.as_str());
bytearray.write_bytes(&encoded_bytes.into_owned());
}
}
Ok(Value::Undefined)
}
pub fn read_multibyte<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let len = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_u32(activation)?;
let charset_label = args
.get(1)
.unwrap_or(&"UTF-8".into())
.coerce_to_string(activation)?;
let bytes = bytearray.read_exact(len as usize)?;
let encoder = Encoding::for_label(charset_label.as_bytes()).unwrap_or(UTF_8);
let (decoded_str, _, _) = encoder.decode(bytes);
return Ok(AvmString::new(activation.context.gc_context, decoded_str).into());
}
}
Ok(Value::Undefined)
}
pub fn write_utf_bytes<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
let string = args
.get(0)
.unwrap_or(&Value::Undefined)
.coerce_to_string(activation)?;
bytearray.write_bytes(string.as_bytes());
}
}
Ok(Value::Undefined)
}
pub fn compress<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
if let Value::String(string) = args.get(0).unwrap_or(&Value::Undefined) {
let compressed = match string.as_str() {
"zlib" => bytearray.zlib_compress(),
"deflate" => bytearray.deflate_compress(),
&_ => return Ok(Value::Undefined),
};
if let Ok(buffer) = compressed {
bytearray.clear();
bytearray.write_bytes(&buffer);
}
}
}
}
Ok(Value::Undefined)
}
pub fn uncompress<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
if let Value::String(string) = args.get(0).unwrap_or(&Value::Undefined) {
let compressed = match string.as_str() {
"zlib" => bytearray.zlib_decompress(),
"deflate" => bytearray.deflate_decompress(),
&_ => return Ok(Value::Undefined),
};
if let Ok(buffer) = compressed {
bytearray.clear();
bytearray.write_bytes(&buffer);
}
}
}
}
Ok(Value::Undefined)
}
pub fn deflate<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
if let Ok(buffer) = bytearray.deflate_compress() {
bytearray.clear();
bytearray.write_bytes(&buffer);
}
}
}
Ok(Value::Undefined)
}
pub fn inflate<'gc>(
activation: &mut Activation<'_, 'gc, '_>,
this: Option<Object<'gc>>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error> {
if let Some(this) = this {
if let Some(mut bytearray) = this.as_bytearray_mut(activation.context.gc_context) {
if let Ok(buffer) = bytearray.deflate_decompress() {
bytearray.clear();
bytearray.write_bytes(&buffer);
}
}
}
Ok(Value::Undefined)
}
pub fn create_class<'gc>(mc: MutationContext<'gc, '_>) -> GcCell<'gc, Class<'gc>> {
let class = Class::new(
QName::new(Namespace::package("flash.utils"), "ByteArray"),
Some(QName::new(Namespace::public(), "Object").into()),
Method::from_builtin(instance_init),
Method::from_builtin(class_init),
mc,
);
let mut write = class.write(mc);
write.set_attributes(ClassAttributes::SEALED);
const PUBLIC_INSTANCE_METHODS: &[(&str, NativeMethod)] = &[
("writeByte", write_byte),
("writeBytes", write_bytes),
("readBytes", read_bytes),
("toString", to_string),
("readShort", read_short),
("writeShort", write_short),
("readUnsignedShort", read_unsigned_short),
("readDouble", read_double),
("writeDouble", write_double),
("readFloat", read_float),
("writeFloat", write_float),
("readInt", read_int),
("writeInt", write_int),
("readUnsignedInt", read_unsigned_int),
("writeUnsignedInt", write_unsigned_int),
("readBoolean", read_boolean),
("writeBoolean", write_boolean),
("readByte", read_byte),
("readUnsignedByte", read_unsigned_byte),
("writeUTF", write_utf),
("readUTF", read_utf),
("clear", clear),
("compress", compress),
("uncompress", uncompress),
("inflate", inflate),
("deflate", deflate),
("writeMultiByte", write_multibyte),
("readMultiByte", read_multibyte),
("writeUTFBytes", write_utf_bytes),
("readUTFBytes", read_utf_bytes),
];
write.define_public_builtin_instance_methods(PUBLIC_INSTANCE_METHODS);
const PUBLIC_INSTANCE_PROPERTIES: &[(&str, Option<NativeMethod>, Option<NativeMethod>)] = &[
("bytesAvailable", Some(bytes_available), None),
("length", Some(length), Some(set_length)),
("position", Some(position), Some(set_position)),
("endian", Some(endian), Some(set_endian)),
];
write.define_public_builtin_instance_properties(PUBLIC_INSTANCE_PROPERTIES);
class
}
| 31.277215 | 131 | 0.561779 |
e69ec395f55199e22ae9bfaad05b3150debfb78b | 18,727 | use anyhow::Result;
/// AST used for code generation, as opposed to the parsing AST
use std::{collections::BTreeMap, collections::BTreeSet, rc::Rc};
use crate::ast;
use crate::lexer::SrcSpan;
use std::fmt::Debug;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum TopDeclaration {
Struct(Struct),
Enum(Enum),
Alias(Alias),
}
impl TopDeclaration {
pub(crate) fn get_name(&self) -> &str {
match self {
TopDeclaration::Struct(s) => &s.name,
TopDeclaration::Enum(e) => &e.name,
TopDeclaration::Alias(a) => &a.name,
}
}
pub(crate) fn location(&self) -> SrcSpan {
match self {
TopDeclaration::Struct(s) => s.location,
TopDeclaration::Enum(e) => e.location,
TopDeclaration::Alias(a) => a.location,
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Struct {
pub location: SrcSpan,
pub name: String,
// only generic types are allowed in definition
pub type_parameters: Vec<String>,
pub fields: Vec<Field>,
}
/// enum Foo = {A, B, C, D}
/// enum Bar = {String, Int}
/// enum Maybe<T> = {Nothing, Just(T)}
/// enum Detailed<A,B> = {Nothing, Something(A,B)}
/// enum WithFields = {A, B{b_val: Int}}
/// the last example, when monomorphised would give:
/// ["Detailed", "val_A", val_B""]
/// or {"type": "Detailed", "value": ["val_A", "val_B"]}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Enum {
pub location: SrcSpan,
pub name: String,
// only generic types are allowed in definition
pub type_parameters: Vec<String>,
pub variants: Vec<EnumVariant>,
pub directives: ast::Directives,
}
/// type MaybeInt = Maybe<Int>
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Alias {
pub location: SrcSpan,
pub name: String,
pub alias: AliasType,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum AliasType {
Atomic(AtomicType),
Reference(RefType),
Builtin(Builtin),
}
/// field1: Int,
/// field2: Foo,
/// field3: Bar<T>,
/// field4: Bar<Int>,
/// field5: Bar<Map<String, V>>
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Field {
pub location: SrcSpan,
pub name: String,
pub typ: FieldType,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum FieldType {
/// name of the field holding the referenced type and name
/// of the optional type parameter
TypeOf(String, Option<String>),
Type(Type),
}
impl FieldType {
pub fn is_generic_type(&self) -> bool {
match self {
FieldType::Type(Type::TypeParameter(_)) => true,
_ => false,
}
}
}
/// A type can be atomic (String, Bool, Int…), a reference to another type
/// (Foo, Bar<T>, Map<String, Int>), or a generic type like `T` or `errorType`.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Type {
/// One of the basic atomic type
Atomic(AtomicType),
/// Generic type or reference to existing type
/// like Barf<Foo, T, Bar<Int>>
Reference(RefType),
/// Builtin type like List, Map and Optional
Builtin(Builtin),
/// Type parameter, like T (in Foo<T>)
TypeParameter(String),
}
impl Enum {
/// returns true if the sum type is actually a simple enum,
/// that is, only has variants with only constructor and no
/// data attached to them
pub fn is_simple_enum(&self) -> bool {
for v in self.variants.iter() {
match v.value {
VariantValue::OnlyCtor => continue,
_ => return false,
}
}
true
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum AtomicType {
Str,
UInt,
Int,
Int8,
Int16,
Int32,
Int64,
UInt8,
UInt16,
UInt32,
UInt64,
Float,
Bool,
Bytes,
}
/// A reference to another type. Like Foo, or Bar<T>
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct RefType {
pub location: SrcSpan,
pub name: String,
pub type_parameters: Vec<Type>,
pub target: Rc<TopDeclaration>,
/// Populated from a #[typeof(…)] field when an Enum
/// is embedded into a Struct
/// Used for efficient parsing
pub variant_hint: Option<String>,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Builtin {
List(Box<Type>),
Optional(Box<Type>),
Map(Box<Type>, Box<Type>),
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct EnumVariant {
pub location: SrcSpan,
pub name: String,
pub alias: Option<String>,
pub value: VariantValue,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum VariantValue {
OnlyCtor,
PositionalCtor(Vec<Type>),
// StructCtor(Vec<Field>),
}
impl VariantValue {
pub fn is_only_ctor(&self) -> bool {
match &self {
VariantValue::OnlyCtor => true,
_ => false,
}
}
}
type Mappings<'a> = BTreeMap<String, Rc<TopDeclaration>>;
#[derive(Debug)]
struct ConvertContext<'a> {
// map of all top declarations seen so far, used to resolve
// reference types
top_declaration_mappings: &'a BTreeMap<String, Rc<TopDeclaration>>,
// track the relationship between #[typeof(…)] declarations
// and other fields of the struct
type_mappings: Vec<(&'a str, &'a str)>,
// the list of in-scope type parameters
type_parameters: BTreeSet<String>,
}
// used to propagate information about #[typeof] fields
// (field_name, target_name)
// foo: #[typeof(bar)] => (foo, bar)
type TypeMappings<'a> = Vec<(&'a str, &'a str)>;
/// Turn a Vec<ast::TopDeclaration> into a Vec<TopDeclaration> ready to be used
/// for code generation
pub fn to_gen_ast(decls: Vec<ast::TopDeclaration>) -> Result<Vec<TopDeclaration>> {
let mut mappings = BTreeMap::new();
decls.iter().map(|d| d.to_gen_ast(&mut mappings)).collect()
}
impl ast::TopDeclaration {
fn to_gen_ast(&self, mappings: &mut Mappings) -> Result<TopDeclaration> {
let result = match self {
ast::TopDeclaration::Struct(s) => s.to_gen_ast(&mappings).map(TopDeclaration::Struct),
ast::TopDeclaration::Enum(e) => e.to_gen_ast(&mappings).map(TopDeclaration::Enum),
ast::TopDeclaration::Alias(a) => a.to_gen_ast(&mappings).map(TopDeclaration::Alias),
}?;
// would be nice to avoid the result.clone()
mappings.insert(self.get_name().to_string(), Rc::new(result.clone()));
Ok(result)
}
}
impl ast::Struct {
fn to_gen_ast(&self, mappings: &Mappings) -> Result<Struct> {
let type_mappings: TypeMappings = self
.fields
.iter()
.filter_map(|f| match &f.typ {
ast::FieldType::TypeOf(s) => Some((f.name.as_ref(), s.as_ref())),
ast::FieldType::Type(_) => None,
})
.collect();
for (type_of_name, target_field_name) in type_mappings.iter() {
let found = self.fields.iter().find(|f| &f.name == target_field_name);
if found.is_none() {
return Err(anyhow!(
"{} references a field named {} but it is not present in the struct",
type_of_name,
target_field_name
));
}
}
let type_params = self.type_parameters.iter().cloned().collect();
let fields = self
.fields
.iter()
.map(|f| f.to_gen_ast(&mappings, &type_mappings, &type_params, &self.fields))
.collect::<Result<_>>();
Ok(Struct {
location: self.location,
name: self.name.clone(),
type_parameters: self.type_parameters.clone(),
fields: fields?,
})
}
}
impl ast::Enum {
fn to_gen_ast(&self, mappings: &Mappings) -> Result<Enum> {
let type_params = self.type_parameters.iter().cloned().collect();
let variants = self
.variants
.iter()
.map(|v| v.to_gen_ast(&mappings, &type_params))
.collect::<Result<_>>();
Ok(Enum {
location: self.location,
name: self.name.clone(),
type_parameters: self.type_parameters.clone(),
variants: variants?,
directives: self.directives.clone(),
})
}
}
impl ast::Alias {
fn to_gen_ast(&self, mappings: &Mappings) -> Result<Alias> {
match mappings.get(&self.name) {
Some(top_decl) => {
return Err(anyhow!(
"{} already declared at line {}",
self.name,
top_decl.location().start.line
));
}
None => (),
};
let alias = match self.alias.to_gen_ast(&mappings, None, &BTreeSet::new())? {
Type::Atomic(a) => Ok(AliasType::Atomic(a)),
Type::Reference(r) => {
let v = Vec::new();
let target_typevars = match &*r.target {
TopDeclaration::Struct(s) => &s.type_parameters,
TopDeclaration::Enum(e) => &e.type_parameters,
TopDeclaration::Alias(_) => &v,
};
if r.type_parameters.len() != target_typevars.len() {
Err(anyhow!(
"An alias must specify all type parameters. Expected {:?} but got {:?}",
target_typevars,
r.type_parameters
))
} else {
Ok(AliasType::Reference(r))
}
}
Type::Builtin(b) => Ok(AliasType::Builtin(b)),
Type::TypeParameter(typs) => Err(anyhow!(
"Alias cannot have type paremeter. Found: {:?}",
typs
)),
}?;
Ok(Alias {
location: self.location,
name: self.name.clone(),
alias,
})
}
}
impl ast::Field {
fn to_gen_ast(
&self,
mappings: &Mappings,
type_mappings: &TypeMappings,
type_params: &BTreeSet<String>,
fields: &Vec<ast::Field>,
) -> Result<Field> {
let variant_hint = type_mappings
.iter()
.find(|(_typeof_field_name, target)| **target == self.name)
.map(|(field_name, _)| (*field_name).to_string());
Ok(Field {
location: self.location,
name: self.name.clone(),
typ: self
.typ
.to_gen_ast(&mappings, variant_hint, type_params, &self.name, fields)?,
})
}
}
impl ast::FieldType {
fn to_gen_ast(
&self,
mappings: &Mappings,
variant_hint: Option<String>,
type_params: &BTreeSet<String>,
field_name: &str,
fields: &Vec<ast::Field>,
) -> Result<FieldType> {
match self {
ast::FieldType::TypeOf(s) => match fields.iter().find(|f| &f.name == s) {
None => Err(anyhow!(
"field {} references another field: {} but it wasn't found in the struct",
field_name,
s
)),
Some(target_field) => {
match &target_field.typ {
ast::FieldType::TypeOf(_) => {
Err(anyhow!("field {} is the type of {}. The target field cannot be itself a #[typeof]", field_name, target_field.name))
}
ast::FieldType::Type(t) => match t {
ast::Type::Atomic(_) => Err(anyhow!(
"field {} cannot be the type of the atomic type behind {}",
field_name,
target_field.name
)),
ast::Type::Builtin(_) => Err(anyhow!(
"field {} cannot be the type of the builtin type behind {}",
field_name,
target_field.name
)),
ast::Type::Reference(_) => {
// TODO: lookup the referenced type, and reject if it's an
// atomic type, a builtin or a struct. Only allow type
// parameters and enums.
let tv = match &target_field.typ {
// check if the referenced field is a type parameter
ast::FieldType::Type(ast::Type::Reference(r)) => {
match type_params.get(&r.name) {
Some(_) => Some(r.name.clone()),
None => None,
}
}
_ => None,
};
Ok(FieldType::TypeOf(s.clone(), tv))
}
},
}
}
},
ast::FieldType::Type(t) => t
.to_gen_ast(&mappings, variant_hint, type_params)
.map(FieldType::Type),
}
}
}
impl ast::EnumVariant {
fn to_gen_ast(
&self,
mappings: &Mappings,
type_params: &BTreeSet<String>,
) -> Result<EnumVariant> {
Ok(EnumVariant {
location: self.location,
name: self.name.clone(),
alias: self.alias.clone(),
value: self.value.to_gen_ast(&mappings, type_params)?,
})
}
}
impl ast::VariantValue {
fn to_gen_ast(
&self,
mappings: &Mappings,
type_params: &BTreeSet<String>,
) -> Result<VariantValue> {
match self {
ast::VariantValue::OnlyCtor => Ok(VariantValue::OnlyCtor),
ast::VariantValue::PositionalCtor(ctors) => {
let result = ctors
.iter()
.map(|t| t.to_gen_ast(&mappings, None, type_params))
.collect::<Result<_>>();
Ok(VariantValue::PositionalCtor(result?))
}
ast::VariantValue::StructCtor(_) => todo!("anon struct in enum not supported"),
}
}
}
impl ast::Type {
fn to_gen_ast(
&self,
mappings: &Mappings,
variant_hint: Option<String>,
type_params: &BTreeSet<String>,
) -> Result<Type> {
match self {
ast::Type::Atomic(t) => Ok(Type::Atomic(t.into())),
ast::Type::Reference(r) => {
let ref_type = mappings
.get(&r.name[..])
// .ok_or(anyhow!("Reference {} not found", r.name).into())
.and_then(|target| {
let result = r
.type_parameters
.iter()
// no variant hints for type params
.map(|t| t.to_gen_ast(&mappings, None, type_params))
.collect::<Result<_>>()
.map(|params| {
Type::Reference(RefType {
location: r.location,
name: r.name.clone(),
type_parameters: params,
target: (*target).clone(),
variant_hint,
})
});
Some(result)
});
let type_param = if type_params.contains(&r.name) {
Some(Ok(Type::TypeParameter(r.name.clone())))
} else {
None
};
ref_type
.or(type_param)
.ok_or(anyhow!("{} is not a valid reference to an existing type nor a type parameter in scope.", r.name))?
}
ast::Type::Builtin(b) => {
let b2 = match b {
ast::Builtin::List(inner) => {
Builtin::List(Box::new(inner.to_gen_ast(mappings, None, type_params)?))
}
ast::Builtin::Optional(inner) => Builtin::Optional(Box::new(
inner.to_gen_ast(mappings, None, type_params)?,
)),
ast::Builtin::Map(k, v) => Builtin::Map(
Box::new(k.to_gen_ast(mappings, None, type_params)?),
Box::new(v.to_gen_ast(mappings, None, type_params)?),
),
};
Ok(Type::Builtin(b2))
}
}
}
}
impl From<&ast::AtomicType> for AtomicType {
fn from(t: &ast::AtomicType) -> Self {
match t {
ast::AtomicType::Str => AtomicType::Str,
ast::AtomicType::UInt => AtomicType::UInt,
ast::AtomicType::Int => AtomicType::Int,
ast::AtomicType::Int8 => AtomicType::Int8,
ast::AtomicType::Int16 => AtomicType::Int16,
ast::AtomicType::Int32 => AtomicType::Int32,
ast::AtomicType::Int64 => AtomicType::Int64,
ast::AtomicType::UInt8 => AtomicType::UInt8,
ast::AtomicType::UInt16 => AtomicType::UInt16,
ast::AtomicType::UInt32 => AtomicType::UInt32,
ast::AtomicType::UInt64 => AtomicType::UInt64,
ast::AtomicType::Float => AtomicType::Float,
ast::AtomicType::Bool => AtomicType::Bool,
ast::AtomicType::Bytes => AtomicType::Bytes,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::dare;
use crate::lexer::Lexer;
use pretty_assertions::assert_eq;
#[test]
fn variant_hint() {
let decls = dare::TopDeclarationsParser::new()
.parse(Lexer::new(
"struct Bar {f: Int}\nstruct Foo {\"type\": #[typeof(\"payload\")], payload: Bar}",
))
.unwrap();
let exprs = to_gen_ast(decls).unwrap();
match &exprs[1] {
TopDeclaration::Struct(s) => match &s.fields[1].typ {
FieldType::Type(Type::Reference(r)) => {
assert_eq!(Some("type".to_string()), r.variant_hint);
}
x => assert!(
false,
format!("mismatched type, expected a reference but got {:?}", x)
),
},
x => assert!(false, "expected struct but got {:?}", x),
};
}
}
| 32.625436 | 148 | 0.499813 |
ccb918010bc644d331548d7be66bf647fb34084d | 2,019 | #![allow(unused)]
use std::borrow::Cow;
use std::collections::HashMap;
use clap::ArgMatches;
use crate::envfile::EnvFile;
use futures::stream::FuturesUnordered;
use std::{env, thread};
use anyhow::anyhow;
use anyhow::Result;
use colored::Colorize;
use futures::{stream, StreamExt};
use slice_group_by::GroupBy;
use tabular::Row;
use command::{deploy, list, put_env};
use crate::api::{DeployCursor, EnvVar};
mod envfile;
mod api;
mod command;
type ResultVec<A, B> = Result<Vec<A>, B>;
const VERSION: &str = env!("CARGO_PKG_VERSION");
const NAME: &str = env!("CARGO_PKG_NAME");
fn main() -> anyhow::Result<()> {
let args = clap::Command::new(NAME)
.version(VERSION)
.about("CLI for Render.com")
.subcommand_required(true)
.arg_required_else_help(true)
.arg(clap::Arg::new("token")
.env("RENDER_TOKEN")
.global(true)
.long("token")
.takes_value(true)
)
.subcommand(clap::Command::new("put-env")
.arg(clap::Arg::new("service")
.required(true)
.help("The service name")
)
.arg(clap::Arg::new("env_files")
.required(true)
.multiple_values(true)
.help("The env files to read")
)
)
.subcommand(clap::Command::new("deploy")
.arg(clap::Arg::new("service")
.required(true)
.takes_value(true)
.help("The service name")
)
)
.subcommand(clap::Command::new("list")
.alias("ls")
)
.get_matches();
let token = args.value_of("token").unwrap();
match args.subcommand().unwrap() {
("put-env", args) => {
put_env::put_env(token, args)
}
("deploy", args) => {
deploy::deploy(token, args)
}
("list", args) => {
list::list_services(token)
}
_ => unreachable!()
}
} | 26.565789 | 49 | 0.528975 |
039e015365651032998b36b07a4dcca048746066 | 39,589 | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! [Flexible target specification.](https://github.com/rust-lang/rfcs/pull/131)
//!
//! Rust targets a wide variety of usecases, and in the interest of flexibility,
//! allows new target triples to be defined in configuration files. Most users
//! will not need to care about these, but this is invaluable when porting Rust
//! to a new platform, and allows for an unprecedented level of control over how
//! the compiler works.
//!
//! # Using custom targets
//!
//! A target triple, as passed via `rustc --target=TRIPLE`, will first be
//! compared against the list of built-in targets. This is to ease distributing
//! rustc (no need for configuration files) and also to hold these built-in
//! targets as immutable and sacred. If `TRIPLE` is not one of the built-in
//! targets, rustc will check if a file named `TRIPLE` exists. If it does, it
//! will be loaded as the target configuration. If the file does not exist,
//! rustc will search each directory in the environment variable
//! `RUST_TARGET_PATH` for a file named `TRIPLE.json`. The first one found will
//! be loaded. If no file is found in any of those directories, a fatal error
//! will be given.
//!
//! Projects defining their own targets should use
//! `--target=path/to/my-awesome-platform.json` instead of adding to
//! `RUST_TARGET_PATH`.
//!
//! # Defining a new target
//!
//! Targets are defined using [JSON](http://json.org/). The `Target` struct in
//! this module defines the format the JSON file should take, though each
//! underscore in the field names should be replaced with a hyphen (`-`) in the
//! JSON file. Some fields are required in every target specification, such as
//! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`,
//! `arch`, and `os`. In general, options passed to rustc with `-C` override
//! the target's settings, though `target-feature` and `link-args` will *add*
//! to the list specified by the target, rather than replace.
use serialize::json::{Json, ToJson};
use std::collections::BTreeMap;
use std::default::Default;
use std::io::prelude::*;
use syntax::abi::{Abi, lookup as lookup_abi};
use {LinkerFlavor, PanicStrategy, RelroLevel};
mod android_base;
mod apple_base;
mod apple_ios_base;
mod arm_base;
mod bitrig_base;
mod dragonfly_base;
mod emscripten_base;
mod freebsd_base;
mod haiku_base;
mod linux_base;
mod linux_musl_base;
mod openbsd_base;
mod netbsd_base;
mod solaris_base;
mod windows_base;
mod windows_msvc_base;
mod thumb_base;
mod l4re_base;
mod fuchsia_base;
mod redox_base;
pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<String>>;
pub type TargetResult = Result<Target, String>;
macro_rules! supported_targets {
( $(($triple:expr, $module:ident),)+ ) => (
$(mod $module;)*
/// List of supported targets
const TARGETS: &'static [&'static str] = &[$($triple),*];
fn load_specific(target: &str) -> TargetResult {
match target {
$(
$triple => {
let mut t = $module::target()?;
t.options.is_builtin = true;
// round-trip through the JSON parser to ensure at
// run-time that the parser works correctly
t = Target::from_json(t.to_json())?;
debug!("Got builtin target: {:?}", t);
Ok(t)
},
)+
_ => Err(format!("Unable to find target: {}", target))
}
}
pub fn get_targets() -> Box<Iterator<Item=String>> {
Box::new(TARGETS.iter().filter_map(|t| -> Option<String> {
load_specific(t)
.and(Ok(t.to_string()))
.ok()
}))
}
#[cfg(test)]
mod test_json_encode_decode {
use serialize::json::ToJson;
use super::Target;
$(use super::$module;)*
$(
#[test]
fn $module() {
// Grab the TargetResult struct. If we successfully retrieved
// a Target, then the test JSON encoding/decoding can run for this
// Target on this testing platform (i.e., checking the iOS targets
// only on a Mac test platform).
let _ = $module::target().map(|original| {
let as_json = original.to_json();
let parsed = Target::from_json(as_json).unwrap();
assert_eq!(original, parsed);
});
}
)*
}
)
}
supported_targets! {
("x86_64-unknown-linux-gnu", x86_64_unknown_linux_gnu),
("i686-unknown-linux-gnu", i686_unknown_linux_gnu),
("i586-unknown-linux-gnu", i586_unknown_linux_gnu),
("mips-unknown-linux-gnu", mips_unknown_linux_gnu),
("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64),
("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64),
("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu),
("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu),
("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu),
("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu),
("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf),
("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi),
("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf),
("armv5te-unknown-linux-gnueabi", armv5te_unknown_linux_gnueabi),
("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf),
("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf),
("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu),
("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl),
("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl),
("i686-unknown-linux-musl", i686_unknown_linux_musl),
("mips-unknown-linux-musl", mips_unknown_linux_musl),
("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl),
("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc),
("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc),
("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
("i686-linux-android", i686_linux_android),
("x86_64-linux-android", x86_64_linux_android),
("arm-linux-androideabi", arm_linux_androideabi),
("armv7-linux-androideabi", armv7_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
("aarch64-unknown-freebsd", aarch64_unknown_freebsd),
("i686-unknown-freebsd", i686_unknown_freebsd),
("x86_64-unknown-freebsd", x86_64_unknown_freebsd),
("i686-unknown-dragonfly", i686_unknown_dragonfly),
("x86_64-unknown-dragonfly", x86_64_unknown_dragonfly),
("x86_64-unknown-bitrig", x86_64_unknown_bitrig),
("i686-unknown-openbsd", i686_unknown_openbsd),
("x86_64-unknown-openbsd", x86_64_unknown_openbsd),
("i686-unknown-netbsd", i686_unknown_netbsd),
("sparc64-unknown-netbsd", sparc64_unknown_netbsd),
("x86_64-unknown-netbsd", x86_64_unknown_netbsd),
("x86_64-rumprun-netbsd", x86_64_rumprun_netbsd),
("i686-unknown-haiku", i686_unknown_haiku),
("x86_64-unknown-haiku", x86_64_unknown_haiku),
("x86_64-apple-darwin", x86_64_apple_darwin),
("i686-apple-darwin", i686_apple_darwin),
("aarch64-unknown-fuchsia", aarch64_unknown_fuchsia),
("x86_64-unknown-fuchsia", x86_64_unknown_fuchsia),
("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc),
("x86_64-unknown-redox", x86_64_unknown_redox),
("i386-apple-ios", i386_apple_ios),
("x86_64-apple-ios", x86_64_apple_ios),
("aarch64-apple-ios", aarch64_apple_ios),
("armv7-apple-ios", armv7_apple_ios),
("armv7s-apple-ios", armv7s_apple_ios),
("x86_64-sun-solaris", x86_64_sun_solaris),
("sparcv9-sun-solaris", sparcv9_sun_solaris),
("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu),
("i686-pc-windows-gnu", i686_pc_windows_gnu),
("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc),
("i686-pc-windows-msvc", i686_pc_windows_msvc),
("i586-pc-windows-msvc", i586_pc_windows_msvc),
("asmjs-unknown-emscripten", asmjs_unknown_emscripten),
("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
("wasm32-experimental-emscripten", wasm32_experimental_emscripten),
("thumbv6m-none-eabi", thumbv6m_none_eabi),
("thumbv7m-none-eabi", thumbv7m_none_eabi),
("thumbv7em-none-eabi", thumbv7em_none_eabi),
("thumbv7em-none-eabihf", thumbv7em_none_eabihf),
("msp430-none-elf", msp430_none_elf),
}
/// Everything `rustc` knows about how to compile for a specific target.
///
/// Every field here must be specified, and has no default value.
#[derive(PartialEq, Clone, Debug)]
pub struct Target {
/// Target triple to pass to LLVM.
pub llvm_target: String,
/// String to use as the `target_endian` `cfg` variable.
pub target_endian: String,
/// String to use as the `target_pointer_width` `cfg` variable.
pub target_pointer_width: String,
/// Width of c_int type
pub target_c_int_width: String,
/// OS name to use for conditional compilation.
pub target_os: String,
/// Environment name to use for conditional compilation.
pub target_env: String,
/// Vendor name to use for conditional compilation.
pub target_vendor: String,
/// Architecture to use for ABI considerations. Valid options: "x86",
/// "x86_64", "arm", "aarch64", "mips", "powerpc", and "powerpc64".
pub arch: String,
/// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
pub data_layout: String,
/// Linker flavor
pub linker_flavor: LinkerFlavor,
/// Optional settings with defaults.
pub options: TargetOptions,
}
/// Optional aspects of a target specification.
///
/// This has an implementation of `Default`, see each field for what the default is. In general,
/// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
#[derive(PartialEq, Clone, Debug)]
pub struct TargetOptions {
/// Whether the target is built-in or loaded from a custom target specification.
pub is_builtin: bool,
/// Linker to invoke. Defaults to "cc".
pub linker: String,
/// Archive utility to use when managing archives. Defaults to "ar".
pub ar: String,
/// Linker arguments that are unconditionally passed *before* any
/// user-defined libraries.
pub pre_link_args: LinkArgs,
/// Objects to link before all others, always found within the
/// sysroot folder.
pub pre_link_objects_exe: Vec<String>, // ... when linking an executable
pub pre_link_objects_dll: Vec<String>, // ... when linking a dylib
/// Linker arguments that are unconditionally passed after any
/// user-defined but before post_link_objects. Standard platform
/// libraries that should be always be linked to, usually go here.
pub late_link_args: LinkArgs,
/// Objects to link after all others, always found within the
/// sysroot folder.
pub post_link_objects: Vec<String>,
/// Linker arguments that are unconditionally passed *after* any
/// user-defined libraries.
pub post_link_args: LinkArgs,
/// Environment variables to be set before invoking the linker.
pub link_env: Vec<(String, String)>,
/// Extra arguments to pass to the external assembler (when used)
pub asm_args: Vec<String>,
/// Default CPU to pass to LLVM. Corresponds to `llc -mcpu=$cpu`. Defaults
/// to "generic".
pub cpu: String,
/// Default target features to pass to LLVM. These features will *always* be
/// passed, and cannot be disabled even via `-C`. Corresponds to `llc
/// -mattr=$features`.
pub features: String,
/// Whether dynamic linking is available on this target. Defaults to false.
pub dynamic_linking: bool,
/// Whether executables are available on this target. iOS, for example, only allows static
/// libraries. Defaults to false.
pub executables: bool,
/// Relocation model to use in object file. Corresponds to `llc
/// -relocation-model=$relocation_model`. Defaults to "pic".
pub relocation_model: String,
/// Code model to use. Corresponds to `llc -code-model=$code_model`. Defaults to "default".
pub code_model: String,
/// Do not emit code that uses the "red zone", if the ABI has one. Defaults to false.
pub disable_redzone: bool,
/// Eliminate frame pointers from stack frames if possible. Defaults to true.
pub eliminate_frame_pointer: bool,
/// Emit each function in its own section. Defaults to true.
pub function_sections: bool,
/// String to prepend to the name of every dynamic library. Defaults to "lib".
pub dll_prefix: String,
/// String to append to the name of every dynamic library. Defaults to ".so".
pub dll_suffix: String,
/// String to append to the name of every executable.
pub exe_suffix: String,
/// String to prepend to the name of every static library. Defaults to "lib".
pub staticlib_prefix: String,
/// String to append to the name of every static library. Defaults to ".a".
pub staticlib_suffix: String,
/// OS family to use for conditional compilation. Valid options: "unix", "windows".
pub target_family: Option<String>,
/// Whether the target toolchain is like OpenBSD's.
/// Only useful for compiling against OpenBSD, for configuring abi when returning a struct.
pub is_like_openbsd: bool,
/// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
/// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
pub is_like_osx: bool,
/// Whether the target toolchain is like Solaris's.
/// Only useful for compiling against Illumos/Solaris,
/// as they have a different set of linker flags. Defaults to false.
pub is_like_solaris: bool,
/// Whether the target toolchain is like Windows'. Only useful for compiling against Windows,
/// only really used for figuring out how to find libraries, since Windows uses its own
/// library naming convention. Defaults to false.
pub is_like_windows: bool,
pub is_like_msvc: bool,
/// Whether the target toolchain is like Android's. Only useful for compiling against Android.
/// Defaults to false.
pub is_like_android: bool,
/// Whether the target toolchain is like Emscripten's. Only useful for compiling with
/// Emscripten toolchain.
/// Defaults to false.
pub is_like_emscripten: bool,
/// Whether the linker support GNU-like arguments such as -O. Defaults to false.
pub linker_is_gnu: bool,
/// The MinGW toolchain has a known issue that prevents it from correctly
/// handling COFF object files with more than 2^15 sections. Since each weak
/// symbol needs its own COMDAT section, weak linkage implies a large
/// number sections that easily exceeds the given limit for larger
/// codebases. Consequently we want a way to disallow weak linkage on some
/// platforms.
pub allows_weak_linkage: bool,
/// Whether the linker support rpaths or not. Defaults to false.
pub has_rpath: bool,
/// Whether to disable linking to the default libraries, typically corresponds
/// to `-nodefaultlibs`. Defaults to true.
pub no_default_libraries: bool,
/// Dynamically linked executables can be compiled as position independent
/// if the default relocation model of position independent code is not
/// changed. This is a requirement to take advantage of ASLR, as otherwise
/// the functions in the executable are not randomized and can be used
/// during an exploit of a vulnerability in any code.
pub position_independent_executables: bool,
/// Either partial, full, or off. Full RELRO makes the dynamic linker
/// resolve all symbols at startup and marks the GOT read-only before
/// starting the program, preventing overwriting the GOT.
pub relro_level: RelroLevel,
/// Format that archives should be emitted in. This affects whether we use
/// LLVM to assemble an archive or fall back to the system linker, and
/// currently only "gnu" is used to fall into LLVM. Unknown strings cause
/// the system linker to be used.
pub archive_format: String,
/// Is asm!() allowed? Defaults to true.
pub allow_asm: bool,
/// Whether the target uses a custom unwind resumption routine.
/// By default LLVM lowers `resume` instructions into calls to `_Unwind_Resume`
/// defined in libgcc. If this option is enabled, the target must provide
/// `eh_unwind_resume` lang item.
pub custom_unwind_resume: bool,
/// If necessary, a different crate to link exe allocators by default
pub exe_allocation_crate: Option<String>,
/// Flag indicating whether ELF TLS (e.g. #[thread_local]) is available for
/// this target.
pub has_elf_tls: bool,
// This is mainly for easy compatibility with emscripten.
// If we give emcc .o files that are actually .bc files it
// will 'just work'.
pub obj_is_bitcode: bool,
// LLVM can't produce object files for this target. Instead, we'll make LLVM
// emit assembly and then use `gcc` to turn that assembly into an object
// file
pub no_integrated_as: bool,
/// Don't use this field; instead use the `.min_atomic_width()` method.
pub min_atomic_width: Option<u64>,
/// Don't use this field; instead use the `.max_atomic_width()` method.
pub max_atomic_width: Option<u64>,
/// Panic strategy: "unwind" or "abort"
pub panic_strategy: PanicStrategy,
/// A blacklist of ABIs unsupported by the current target. Note that generic
/// ABIs are considered to be supported on all platforms and cannot be blacklisted.
pub abi_blacklist: Vec<Abi>,
/// Whether or not linking dylibs to a static CRT is allowed.
pub crt_static_allows_dylibs: bool,
/// Whether or not the CRT is statically linked by default.
pub crt_static_default: bool,
/// Whether or not crt-static is respected by the compiler (or is a no-op).
pub crt_static_respected: bool,
/// Whether or not stack probes (__rust_probestack) are enabled
pub stack_probes: bool,
/// The minimum alignment for global symbols.
pub min_global_align: Option<u64>,
}
impl Default for TargetOptions {
/// Create a set of "sane defaults" for any target. This is still
/// incomplete, and if used for compilation, will certainly not work.
fn default() -> TargetOptions {
TargetOptions {
is_builtin: false,
linker: option_env!("CFG_DEFAULT_LINKER").unwrap_or("cc").to_string(),
ar: option_env!("CFG_DEFAULT_AR").unwrap_or("ar").to_string(),
pre_link_args: LinkArgs::new(),
post_link_args: LinkArgs::new(),
asm_args: Vec::new(),
cpu: "generic".to_string(),
features: "".to_string(),
dynamic_linking: false,
executables: false,
relocation_model: "pic".to_string(),
code_model: "default".to_string(),
disable_redzone: false,
eliminate_frame_pointer: true,
function_sections: true,
dll_prefix: "lib".to_string(),
dll_suffix: ".so".to_string(),
exe_suffix: "".to_string(),
staticlib_prefix: "lib".to_string(),
staticlib_suffix: ".a".to_string(),
target_family: None,
is_like_openbsd: false,
is_like_osx: false,
is_like_solaris: false,
is_like_windows: false,
is_like_android: false,
is_like_emscripten: false,
is_like_msvc: false,
linker_is_gnu: false,
allows_weak_linkage: true,
has_rpath: false,
no_default_libraries: true,
position_independent_executables: false,
relro_level: RelroLevel::Off,
pre_link_objects_exe: Vec::new(),
pre_link_objects_dll: Vec::new(),
post_link_objects: Vec::new(),
late_link_args: LinkArgs::new(),
link_env: Vec::new(),
archive_format: "gnu".to_string(),
custom_unwind_resume: false,
exe_allocation_crate: None,
allow_asm: true,
has_elf_tls: false,
obj_is_bitcode: false,
no_integrated_as: false,
min_atomic_width: None,
max_atomic_width: None,
panic_strategy: PanicStrategy::Unwind,
abi_blacklist: vec![],
crt_static_allows_dylibs: false,
crt_static_default: false,
crt_static_respected: false,
stack_probes: false,
min_global_align: None,
}
}
}
impl Target {
/// Given a function ABI, turn "System" into the correct ABI for this target.
pub fn adjust_abi(&self, abi: Abi) -> Abi {
match abi {
Abi::System => {
if self.options.is_like_windows && self.arch == "x86" {
Abi::Stdcall
} else {
Abi::C
}
},
abi => abi
}
}
/// Minimum integer size in bits that this target can perform atomic
/// operations on.
pub fn min_atomic_width(&self) -> u64 {
self.options.min_atomic_width.unwrap_or(8)
}
/// Maximum integer size in bits that this target can perform atomic
/// operations on.
pub fn max_atomic_width(&self) -> u64 {
self.options.max_atomic_width.unwrap_or(self.target_pointer_width.parse().unwrap())
}
pub fn is_abi_supported(&self, abi: Abi) -> bool {
abi.generic() || !self.options.abi_blacklist.contains(&abi)
}
/// Load a target descriptor from a JSON object.
pub fn from_json(obj: Json) -> TargetResult {
// While ugly, this code must remain this way to retain
// compatibility with existing JSON fields and the internal
// expected naming of the Target and TargetOptions structs.
// To ensure compatibility is retained, the built-in targets
// are round-tripped through this code to catch cases where
// the JSON parser is not updated to match the structs.
let get_req_field = |name: &str| {
match obj.find(name)
.map(|s| s.as_string())
.and_then(|os| os.map(|s| s.to_string())) {
Some(val) => Ok(val),
None => {
return Err(format!("Field {} in target specification is required", name))
}
}
};
let get_opt_field = |name: &str, default: &str| {
obj.find(name).and_then(|s| s.as_string())
.map(|s| s.to_string())
.unwrap_or(default.to_string())
};
let mut base = Target {
llvm_target: get_req_field("llvm-target")?,
target_endian: get_req_field("target-endian")?,
target_pointer_width: get_req_field("target-pointer-width")?,
target_c_int_width: get_req_field("target-c-int-width")?,
data_layout: get_req_field("data-layout")?,
arch: get_req_field("arch")?,
target_os: get_req_field("os")?,
target_env: get_opt_field("env", ""),
target_vendor: get_opt_field("vendor", "unknown"),
linker_flavor: LinkerFlavor::from_str(&*get_req_field("linker-flavor")?)
.ok_or_else(|| {
format!("linker flavor must be {}", LinkerFlavor::one_of())
})?,
options: Default::default(),
};
macro_rules! key {
($key_name:ident) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).map(|o| o.as_string()
.map(|s| base.options.$key_name = s.to_string()));
} );
($key_name:ident, bool) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..])
.map(|o| o.as_boolean()
.map(|s| base.options.$key_name = s));
} );
($key_name:ident, Option<u64>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..])
.map(|o| o.as_u64()
.map(|s| base.options.$key_name = Some(s)));
} );
($key_name:ident, PanicStrategy) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s {
"unwind" => base.options.$key_name = PanicStrategy::Unwind,
"abort" => base.options.$key_name = PanicStrategy::Abort,
_ => return Some(Err(format!("'{}' is not a valid value for \
panic-strategy. Use 'unwind' or 'abort'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, RelroLevel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<RelroLevel>() {
Ok(level) => base.options.$key_name = level,
_ => return Some(Err(format!("'{}' is not a valid value for \
relro-level. Use 'full', 'partial, or 'off'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, list) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).map(|o| o.as_array()
.map(|v| base.options.$key_name = v.iter()
.map(|a| a.as_string().unwrap().to_string()).collect()
)
);
} );
($key_name:ident, optional) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(o) = obj.find(&name[..]) {
base.options.$key_name = o
.as_string()
.map(|s| s.to_string() );
}
} );
($key_name:ident, LinkerFlavor) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().map(|s| {
LinkerFlavor::from_str(&s).ok_or_else(|| {
Err(format!("'{}' is not a valid value for linker-flavor. \
Use 'em', 'gcc', 'ld' or 'msvc.", s))
})
})).unwrap_or(Ok(()))
} );
($key_name:ident, link_args) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(obj) = obj.find(&name[..]).and_then(|o| o.as_object()) {
let mut args = LinkArgs::new();
for (k, v) in obj {
let k = LinkerFlavor::from_str(&k).ok_or_else(|| {
format!("{}: '{}' is not a valid value for linker-flavor. \
Use 'em', 'gcc', 'ld' or 'msvc'", name, k)
})?;
let v = v.as_array().map(|a| {
a
.iter()
.filter_map(|o| o.as_string())
.map(|s| s.to_owned())
.collect::<Vec<_>>()
}).unwrap_or(vec![]);
args.insert(k, v);
}
base.options.$key_name = args;
}
} );
($key_name:ident, env) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(a) = obj.find(&name[..]).and_then(|o| o.as_array()) {
for o in a {
if let Some(s) = o.as_string() {
let p = s.split('=').collect::<Vec<_>>();
if p.len() == 2 {
let k = p[0].to_string();
let v = p[1].to_string();
base.options.$key_name.push((k, v));
}
}
}
}
} );
}
key!(is_builtin, bool);
key!(linker);
key!(ar);
key!(pre_link_args, link_args);
key!(pre_link_objects_exe, list);
key!(pre_link_objects_dll, list);
key!(late_link_args, link_args);
key!(post_link_objects, list);
key!(post_link_args, link_args);
key!(link_env, env);
key!(asm_args, list);
key!(cpu);
key!(features);
key!(dynamic_linking, bool);
key!(executables, bool);
key!(relocation_model);
key!(code_model);
key!(disable_redzone, bool);
key!(eliminate_frame_pointer, bool);
key!(function_sections, bool);
key!(dll_prefix);
key!(dll_suffix);
key!(exe_suffix);
key!(staticlib_prefix);
key!(staticlib_suffix);
key!(target_family, optional);
key!(is_like_openbsd, bool);
key!(is_like_osx, bool);
key!(is_like_solaris, bool);
key!(is_like_windows, bool);
key!(is_like_msvc, bool);
key!(is_like_emscripten, bool);
key!(is_like_android, bool);
key!(linker_is_gnu, bool);
key!(allows_weak_linkage, bool);
key!(has_rpath, bool);
key!(no_default_libraries, bool);
key!(position_independent_executables, bool);
try!(key!(relro_level, RelroLevel));
key!(archive_format);
key!(allow_asm, bool);
key!(custom_unwind_resume, bool);
key!(exe_allocation_crate, optional);
key!(has_elf_tls, bool);
key!(obj_is_bitcode, bool);
key!(no_integrated_as, bool);
key!(max_atomic_width, Option<u64>);
key!(min_atomic_width, Option<u64>);
try!(key!(panic_strategy, PanicStrategy));
key!(crt_static_allows_dylibs, bool);
key!(crt_static_default, bool);
key!(crt_static_respected, bool);
key!(stack_probes, bool);
key!(min_global_align, Option<u64>);
if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) {
for name in array.iter().filter_map(|abi| abi.as_string()) {
match lookup_abi(name) {
Some(abi) => {
if abi.generic() {
return Err(format!("The ABI \"{}\" is considered to be supported on \
all targets and cannot be blacklisted", abi))
}
base.options.abi_blacklist.push(abi)
}
None => return Err(format!("Unknown ABI \"{}\" in target specification", name))
}
}
}
Ok(base)
}
/// Search RUST_TARGET_PATH for a JSON file specifying the given target
/// triple. Note that it could also just be a bare filename already, so also
/// check for that. If one of the hardcoded targets we know about, just
/// return it directly.
///
/// The error string could come from any of the APIs called, including
/// filesystem access and JSON decoding.
pub fn search(target: &str) -> Result<Target, String> {
use std::env;
use std::ffi::OsString;
use std::fs::File;
use std::path::{Path, PathBuf};
use serialize::json;
fn load_file(path: &Path) -> Result<Target, String> {
let mut f = File::open(path).map_err(|e| e.to_string())?;
let mut contents = Vec::new();
f.read_to_end(&mut contents).map_err(|e| e.to_string())?;
let obj = json::from_reader(&mut &contents[..])
.map_err(|e| e.to_string())?;
Target::from_json(obj)
}
if let Ok(t) = load_specific(target) {
return Ok(t)
}
let path = Path::new(target);
if path.is_file() {
return load_file(&path);
}
let path = {
let mut target = target.to_string();
target.push_str(".json");
PathBuf::from(target)
};
let target_path = env::var_os("RUST_TARGET_PATH")
.unwrap_or(OsString::new());
// FIXME 16351: add a sane default search path?
for dir in env::split_paths(&target_path) {
let p = dir.join(&path);
if p.is_file() {
return load_file(&p);
}
}
Err(format!("Could not find specification for target {:?}", target))
}
}
impl ToJson for Target {
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
let default: TargetOptions = Default::default();
macro_rules! target_val {
($attr:ident) => ( {
let name = (stringify!($attr)).replace("_", "-");
d.insert(name.to_string(), self.$attr.to_json());
} );
($attr:ident, $key_name:expr) => ( {
let name = $key_name;
d.insert(name.to_string(), self.$attr.to_json());
} );
}
macro_rules! target_option_val {
($attr:ident) => ( {
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.options.$attr {
d.insert(name.to_string(), self.options.$attr.to_json());
}
} );
($attr:ident, $key_name:expr) => ( {
let name = $key_name;
if default.$attr != self.options.$attr {
d.insert(name.to_string(), self.options.$attr.to_json());
}
} );
(link_args - $attr:ident) => ( {
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.options.$attr {
let obj = self.options.$attr
.iter()
.map(|(k, v)| (k.desc().to_owned(), v.clone()))
.collect::<BTreeMap<_, _>>();
d.insert(name.to_string(), obj.to_json());
}
} );
(env - $attr:ident) => ( {
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.options.$attr {
let obj = self.options.$attr
.iter()
.map(|&(ref k, ref v)| k.clone() + "=" + &v)
.collect::<Vec<_>>();
d.insert(name.to_string(), obj.to_json());
}
} );
}
target_val!(llvm_target);
target_val!(target_endian);
target_val!(target_pointer_width);
target_val!(target_c_int_width);
target_val!(arch);
target_val!(target_os, "os");
target_val!(target_env, "env");
target_val!(target_vendor, "vendor");
target_val!(data_layout);
target_val!(linker_flavor);
target_option_val!(is_builtin);
target_option_val!(linker);
target_option_val!(ar);
target_option_val!(link_args - pre_link_args);
target_option_val!(pre_link_objects_exe);
target_option_val!(pre_link_objects_dll);
target_option_val!(link_args - late_link_args);
target_option_val!(post_link_objects);
target_option_val!(link_args - post_link_args);
target_option_val!(env - link_env);
target_option_val!(asm_args);
target_option_val!(cpu);
target_option_val!(features);
target_option_val!(dynamic_linking);
target_option_val!(executables);
target_option_val!(relocation_model);
target_option_val!(code_model);
target_option_val!(disable_redzone);
target_option_val!(eliminate_frame_pointer);
target_option_val!(function_sections);
target_option_val!(dll_prefix);
target_option_val!(dll_suffix);
target_option_val!(exe_suffix);
target_option_val!(staticlib_prefix);
target_option_val!(staticlib_suffix);
target_option_val!(target_family);
target_option_val!(is_like_openbsd);
target_option_val!(is_like_osx);
target_option_val!(is_like_solaris);
target_option_val!(is_like_windows);
target_option_val!(is_like_msvc);
target_option_val!(is_like_emscripten);
target_option_val!(is_like_android);
target_option_val!(linker_is_gnu);
target_option_val!(allows_weak_linkage);
target_option_val!(has_rpath);
target_option_val!(no_default_libraries);
target_option_val!(position_independent_executables);
target_option_val!(relro_level);
target_option_val!(archive_format);
target_option_val!(allow_asm);
target_option_val!(custom_unwind_resume);
target_option_val!(exe_allocation_crate);
target_option_val!(has_elf_tls);
target_option_val!(obj_is_bitcode);
target_option_val!(no_integrated_as);
target_option_val!(min_atomic_width);
target_option_val!(max_atomic_width);
target_option_val!(panic_strategy);
target_option_val!(crt_static_allows_dylibs);
target_option_val!(crt_static_default);
target_option_val!(crt_static_respected);
target_option_val!(stack_probes);
target_option_val!(min_global_align);
if default.abi_blacklist != self.options.abi_blacklist {
d.insert("abi-blacklist".to_string(), self.options.abi_blacklist.iter()
.map(Abi::name).map(|name| name.to_json())
.collect::<Vec<_>>().to_json());
}
Json::Object(d)
}
}
fn maybe_jemalloc() -> Option<String> {
if cfg!(feature = "jemalloc") {
Some("alloc_jemalloc".to_string())
} else {
None
}
}
| 41.893122 | 100 | 0.590644 |
0157423863c2aa595a831eb5f74b29560efbd037 | 903 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Pair { x: int, y: int }
impl Copy for Pair {}
pub fn main() {
let a: int =
match 10i { x if x < 7 => { 1i } x if x < 11 => { 2i } 10 => { 3i } _ => { 4i } };
assert_eq!(a, 2);
let b: int =
match (Pair {x: 10, y: 20}) {
x if x.x < 5 && x.y < 5 => { 1i }
Pair {x: x, y: y} if x == 10 && y == 20 => { 2i }
Pair {x: _x, y: _y} => { 3i }
};
assert_eq!(b, 2);
}
| 32.25 | 90 | 0.566999 |
097eaddb874083ec2895f553eaf16fecfff555ef | 48,371 | use ArgumentType::*;
use Position::*;
use rustc_ast as ast;
use rustc_ast::ptr::P;
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::{token, BlockCheckMode, UnsafeSource};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::{pluralize, Applicability, DiagnosticBuilder};
use rustc_expand::base::{self, *};
use rustc_parse_format as parse;
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{MultiSpan, Span};
use std::borrow::Cow;
use std::collections::hash_map::Entry;
#[derive(PartialEq)]
enum ArgumentType {
Placeholder(&'static str),
Count,
}
enum Position {
Exact(usize),
Named(Symbol),
}
struct Context<'a, 'b> {
ecx: &'a mut ExtCtxt<'b>,
/// The macro's call site. References to unstable formatting internals must
/// use this span to pass the stability checker.
macsp: Span,
/// The span of the format string literal.
fmtsp: Span,
/// List of parsed argument expressions.
/// Named expressions are resolved early, and are appended to the end of
/// argument expressions.
///
/// Example showing the various data structures in motion:
///
/// * Original: `"{foo:o} {:o} {foo:x} {0:x} {1:o} {:x} {1:x} {0:o}"`
/// * Implicit argument resolution: `"{foo:o} {0:o} {foo:x} {0:x} {1:o} {1:x} {1:x} {0:o}"`
/// * Name resolution: `"{2:o} {0:o} {2:x} {0:x} {1:o} {1:x} {1:x} {0:o}"`
/// * `arg_types` (in JSON): `[[0, 1, 0], [0, 1, 1], [0, 1]]`
/// * `arg_unique_types` (in simplified JSON): `[["o", "x"], ["o", "x"], ["o", "x"]]`
/// * `names` (in JSON): `{"foo": 2}`
args: Vec<P<ast::Expr>>,
/// Placeholder slot numbers indexed by argument.
arg_types: Vec<Vec<usize>>,
/// Unique format specs seen for each argument.
arg_unique_types: Vec<Vec<ArgumentType>>,
/// Map from named arguments to their resolved indices.
names: FxHashMap<Symbol, usize>,
/// The latest consecutive literal strings, or empty if there weren't any.
literal: String,
/// Collection of the compiled `rt::Argument` structures
pieces: Vec<P<ast::Expr>>,
/// Collection of string literals
str_pieces: Vec<P<ast::Expr>>,
/// Stays `true` if all formatting parameters are default (as in "{}{}").
all_pieces_simple: bool,
/// Mapping between positional argument references and indices into the
/// final generated static argument array. We record the starting indices
/// corresponding to each positional argument, and number of references
/// consumed so far for each argument, to facilitate correct `Position`
/// mapping in `build_piece`. In effect this can be seen as a "flattened"
/// version of `arg_unique_types`.
///
/// Again with the example described above in docstring for `args`:
///
/// * `arg_index_map` (in JSON): `[[0, 1, 0], [2, 3, 3], [4, 5]]`
arg_index_map: Vec<Vec<usize>>,
/// Starting offset of count argument slots.
count_args_index_offset: usize,
/// Count argument slots and tracking data structures.
/// Count arguments are separately tracked for de-duplication in case
/// multiple references are made to one argument. For example, in this
/// format string:
///
/// * Original: `"{:.*} {:.foo$} {1:.*} {:.0$}"`
/// * Implicit argument resolution: `"{1:.0$} {2:.foo$} {1:.3$} {4:.0$}"`
/// * Name resolution: `"{1:.0$} {2:.5$} {1:.3$} {4:.0$}"`
/// * `count_positions` (in JSON): `{0: 0, 5: 1, 3: 2}`
/// * `count_args`: `vec![Exact(0), Exact(5), Exact(3)]`
count_args: Vec<Position>,
/// Relative slot numbers for count arguments.
count_positions: FxHashMap<usize, usize>,
/// Number of count slots assigned.
count_positions_count: usize,
/// Current position of the implicit positional arg pointer, as if it
/// still existed in this phase of processing.
/// Used only for `all_pieces_simple` tracking in `build_piece`.
curarg: usize,
/// Current piece being evaluated, used for error reporting.
curpiece: usize,
/// Keep track of invalid references to positional arguments.
invalid_refs: Vec<(usize, usize)>,
/// Spans of all the formatting arguments, in order.
arg_spans: Vec<Span>,
/// All the formatting arguments that have formatting flags set, in order for diagnostics.
arg_with_formatting: Vec<parse::FormatSpec<'a>>,
/// Whether this format string came from a string literal, as opposed to a macro.
is_literal: bool,
}
/// Parses the arguments from the given list of tokens, returning the diagnostic
/// if there's a parse error so we can continue parsing other format!
/// expressions.
///
/// If parsing succeeds, the return value is:
///
/// ```text
/// Some((fmtstr, parsed arguments, index map for named arguments))
/// ```
fn parse_args<'a>(
ecx: &mut ExtCtxt<'a>,
sp: Span,
tts: TokenStream,
) -> Result<(P<ast::Expr>, Vec<P<ast::Expr>>, FxHashMap<Symbol, usize>), DiagnosticBuilder<'a>> {
let mut args = Vec::<P<ast::Expr>>::new();
let mut names = FxHashMap::<Symbol, usize>::default();
let mut p = ecx.new_parser_from_tts(tts);
if p.token == token::Eof {
return Err(ecx.struct_span_err(sp, "requires at least a format string argument"));
}
let first_token = &p.token;
let fmtstr = match first_token.kind {
token::TokenKind::Literal(token::Lit {
kind: token::LitKind::Str | token::LitKind::StrRaw(_),
..
}) => {
// If the first token is a string literal, then a format expression
// is constructed from it.
//
// This allows us to properly handle cases when the first comma
// after the format string is mistakenly replaced with any operator,
// which cause the expression parser to eat too much tokens.
p.parse_literal_maybe_minus()?
}
_ => {
// Otherwise, we fall back to the expression parser.
p.parse_expr()?
}
};
let mut first = true;
let mut named = false;
while p.token != token::Eof {
if !p.eat(&token::Comma) {
if first {
p.clear_expected_tokens();
}
match p.expect(&token::Comma) {
Err(mut err) => {
match token::TokenKind::Comma.similar_tokens() {
Some(tks) if tks.contains(&p.token.kind) => {
// If a similar token is found, then it may be a typo. We
// consider it as a comma, and continue parsing.
err.emit();
p.bump();
}
// Otherwise stop the parsing and return the error.
_ => return Err(err),
}
}
Ok(recovered) => {
assert!(recovered);
}
}
}
first = false;
if p.token == token::Eof {
break;
} // accept trailing commas
match p.token.ident() {
Some((ident, _)) if p.look_ahead(1, |t| *t == token::Eq) => {
named = true;
p.bump();
p.expect(&token::Eq)?;
let e = p.parse_expr()?;
if let Some(prev) = names.get(&ident.name) {
ecx.struct_span_err(e.span, &format!("duplicate argument named `{}`", ident))
.span_label(args[*prev].span, "previously here")
.span_label(e.span, "duplicate argument")
.emit();
continue;
}
// Resolve names into slots early.
// Since all the positional args are already seen at this point
// if the input is valid, we can simply append to the positional
// args. And remember the names.
let slot = args.len();
names.insert(ident.name, slot);
args.push(e);
}
_ => {
let e = p.parse_expr()?;
if named {
let mut err = ecx.struct_span_err(
e.span,
"positional arguments cannot follow named arguments",
);
err.span_label(e.span, "positional arguments must be before named arguments");
for pos in names.values() {
err.span_label(args[*pos].span, "named argument");
}
err.emit();
}
args.push(e);
}
}
}
Ok((fmtstr, args, names))
}
impl<'a, 'b> Context<'a, 'b> {
fn resolve_name_inplace(&self, p: &mut parse::Piece<'_>) {
// NOTE: the `unwrap_or` branch is needed in case of invalid format
// arguments, e.g., `format_args!("{foo}")`.
let lookup = |s: Symbol| *self.names.get(&s).unwrap_or(&0);
match *p {
parse::String(_) => {}
parse::NextArgument(ref mut arg) => {
if let parse::ArgumentNamed(s) = arg.position {
arg.position = parse::ArgumentIs(lookup(s));
}
if let parse::CountIsName(s) = arg.format.width {
arg.format.width = parse::CountIsParam(lookup(s));
}
if let parse::CountIsName(s) = arg.format.precision {
arg.format.precision = parse::CountIsParam(lookup(s));
}
}
}
}
/// Verifies one piece of a parse string, and remembers it if valid.
/// All errors are not emitted as fatal so we can continue giving errors
/// about this and possibly other format strings.
fn verify_piece(&mut self, p: &parse::Piece<'_>) {
match *p {
parse::String(..) => {}
parse::NextArgument(ref arg) => {
// width/precision first, if they have implicit positional
// parameters it makes more sense to consume them first.
self.verify_count(arg.format.width);
self.verify_count(arg.format.precision);
// argument second, if it's an implicit positional parameter
// it's written second, so it should come after width/precision.
let pos = match arg.position {
parse::ArgumentIs(i) | parse::ArgumentImplicitlyIs(i) => Exact(i),
parse::ArgumentNamed(s) => Named(s),
};
let ty = Placeholder(match arg.format.ty {
"" => "Display",
"?" => "Debug",
"e" => "LowerExp",
"E" => "UpperExp",
"o" => "Octal",
"p" => "Pointer",
"b" => "Binary",
"x" => "LowerHex",
"X" => "UpperHex",
_ => {
let fmtsp = self.fmtsp;
let sp = arg.format.ty_span.map(|sp| fmtsp.from_inner(sp));
let mut err = self.ecx.struct_span_err(
sp.unwrap_or(fmtsp),
&format!("unknown format trait `{}`", arg.format.ty),
);
err.note(
"the only appropriate formatting traits are:\n\
- ``, which uses the `Display` trait\n\
- `?`, which uses the `Debug` trait\n\
- `e`, which uses the `LowerExp` trait\n\
- `E`, which uses the `UpperExp` trait\n\
- `o`, which uses the `Octal` trait\n\
- `p`, which uses the `Pointer` trait\n\
- `b`, which uses the `Binary` trait\n\
- `x`, which uses the `LowerHex` trait\n\
- `X`, which uses the `UpperHex` trait",
);
if let Some(sp) = sp {
for (fmt, name) in &[
("", "Display"),
("?", "Debug"),
("e", "LowerExp"),
("E", "UpperExp"),
("o", "Octal"),
("p", "Pointer"),
("b", "Binary"),
("x", "LowerHex"),
("X", "UpperHex"),
] {
// FIXME: rustfix (`run-rustfix`) fails to apply suggestions.
// > "Cannot replace slice of data that was already replaced"
err.tool_only_span_suggestion(
sp,
&format!("use the `{}` trait", name),
(*fmt).to_string(),
Applicability::MaybeIncorrect,
);
}
}
err.emit();
"<invalid>"
}
});
self.verify_arg_type(pos, ty);
self.curpiece += 1;
}
}
}
fn verify_count(&mut self, c: parse::Count) {
match c {
parse::CountImplied | parse::CountIs(..) => {}
parse::CountIsParam(i) => {
self.verify_arg_type(Exact(i), Count);
}
parse::CountIsName(s) => {
self.verify_arg_type(Named(s), Count);
}
}
}
fn describe_num_args(&self) -> Cow<'_, str> {
match self.args.len() {
0 => "no arguments were given".into(),
1 => "there is 1 argument".into(),
x => format!("there are {} arguments", x).into(),
}
}
/// Handle invalid references to positional arguments. Output different
/// errors for the case where all arguments are positional and for when
/// there are named arguments or numbered positional arguments in the
/// format string.
fn report_invalid_references(&self, numbered_position_args: bool) {
let mut e;
let sp = if !self.arg_spans.is_empty() {
// Point at the formatting arguments.
MultiSpan::from_spans(self.arg_spans.clone())
} else {
MultiSpan::from_span(self.fmtsp)
};
let refs =
self.invalid_refs.iter().map(|(r, pos)| (r.to_string(), self.arg_spans.get(*pos)));
let mut zero_based_note = false;
let count = self.pieces.len()
+ self.arg_with_formatting.iter().filter(|fmt| fmt.precision_span.is_some()).count();
if self.names.is_empty() && !numbered_position_args && count != self.args.len() {
e = self.ecx.struct_span_err(
sp,
&format!(
"{} positional argument{} in format string, but {}",
count,
pluralize!(count),
self.describe_num_args(),
),
);
for arg in &self.args {
// Point at the arguments that will be formatted.
e.span_label(arg.span, "");
}
} else {
let (mut refs, spans): (Vec<_>, Vec<_>) = refs.unzip();
// Avoid `invalid reference to positional arguments 7 and 7 (there is 1 argument)`
// for `println!("{7:7$}", 1);`
refs.sort();
refs.dedup();
let spans: Vec<_> = spans.into_iter().filter_map(|sp| sp.copied()).collect();
let sp = if self.arg_spans.is_empty() || spans.is_empty() {
MultiSpan::from_span(self.fmtsp)
} else {
MultiSpan::from_spans(spans)
};
let arg_list = if refs.len() == 1 {
format!("argument {}", refs[0])
} else {
let reg = refs.pop().unwrap();
format!("arguments {head} and {tail}", head = refs.join(", "), tail = reg)
};
e = self.ecx.struct_span_err(
sp,
&format!(
"invalid reference to positional {} ({})",
arg_list,
self.describe_num_args()
),
);
zero_based_note = true;
};
for fmt in &self.arg_with_formatting {
if let Some(span) = fmt.precision_span {
let span = self.fmtsp.from_inner(span);
match fmt.precision {
parse::CountIsParam(pos) if pos > self.args.len() => {
e.span_label(
span,
&format!(
"this precision flag expects an `usize` argument at position {}, \
but {}",
pos,
self.describe_num_args(),
),
);
zero_based_note = true;
}
parse::CountIsParam(pos) => {
let count = self.pieces.len()
+ self
.arg_with_formatting
.iter()
.filter(|fmt| fmt.precision_span.is_some())
.count();
e.span_label(span, &format!(
"this precision flag adds an extra required argument at position {}, \
which is why there {} expected",
pos,
if count == 1 {
"is 1 argument".to_string()
} else {
format!("are {} arguments", count)
},
));
if let Some(arg) = self.args.get(pos) {
e.span_label(
arg.span,
"this parameter corresponds to the precision flag",
);
}
zero_based_note = true;
}
_ => {}
}
}
if let Some(span) = fmt.width_span {
let span = self.fmtsp.from_inner(span);
match fmt.width {
parse::CountIsParam(pos) if pos > self.args.len() => {
e.span_label(
span,
&format!(
"this width flag expects an `usize` argument at position {}, \
but {}",
pos,
self.describe_num_args(),
),
);
zero_based_note = true;
}
_ => {}
}
}
}
if zero_based_note {
e.note("positional arguments are zero-based");
}
if !self.arg_with_formatting.is_empty() {
e.note(
"for information about formatting flags, visit \
https://doc.rust-lang.org/std/fmt/index.html",
);
}
e.emit();
}
/// Actually verifies and tracks a given format placeholder
/// (a.k.a. argument).
fn verify_arg_type(&mut self, arg: Position, ty: ArgumentType) {
match arg {
Exact(arg) => {
if self.args.len() <= arg {
self.invalid_refs.push((arg, self.curpiece));
return;
}
match ty {
Placeholder(_) => {
// record every (position, type) combination only once
let seen_ty = &mut self.arg_unique_types[arg];
let i = seen_ty.iter().position(|x| *x == ty).unwrap_or_else(|| {
let i = seen_ty.len();
seen_ty.push(ty);
i
});
self.arg_types[arg].push(i);
}
Count => {
if let Entry::Vacant(e) = self.count_positions.entry(arg) {
let i = self.count_positions_count;
e.insert(i);
self.count_args.push(Exact(arg));
self.count_positions_count += 1;
}
}
}
}
Named(name) => {
match self.names.get(&name) {
Some(&idx) => {
// Treat as positional arg.
self.verify_arg_type(Exact(idx), ty)
}
None => {
// For the moment capturing variables from format strings expanded from macros is
// disabled (see RFC #2795)
if self.is_literal {
// Treat this name as a variable to capture from the surrounding scope
let idx = self.args.len();
self.arg_types.push(Vec::new());
self.arg_unique_types.push(Vec::new());
let span = if self.is_literal {
*self.arg_spans.get(self.curpiece).unwrap_or(&self.fmtsp)
} else {
self.fmtsp
};
self.args.push(self.ecx.expr_ident(span, Ident::new(name, span)));
self.names.insert(name, idx);
self.verify_arg_type(Exact(idx), ty)
} else {
let msg = format!("there is no argument named `{}`", name);
let sp = if self.is_literal {
*self.arg_spans.get(self.curpiece).unwrap_or(&self.fmtsp)
} else {
self.fmtsp
};
let mut err = self.ecx.struct_span_err(sp, &msg[..]);
err.note(&format!(
"did you intend to capture a variable `{}` from \
the surrounding scope?",
name
));
err.note(
"to avoid ambiguity, `format_args!` cannot capture variables \
when the format string is expanded from a macro",
);
err.emit();
}
}
}
}
}
}
/// Builds the mapping between format placeholders and argument objects.
fn build_index_map(&mut self) {
// NOTE: Keep the ordering the same as `into_expr`'s expansion would do!
let args_len = self.args.len();
self.arg_index_map.reserve(args_len);
let mut sofar = 0usize;
// Map the arguments
for i in 0..args_len {
let arg_types = &self.arg_types[i];
let arg_offsets = arg_types.iter().map(|offset| sofar + *offset).collect::<Vec<_>>();
self.arg_index_map.push(arg_offsets);
sofar += self.arg_unique_types[i].len();
}
// Record starting index for counts, which appear just after arguments
self.count_args_index_offset = sofar;
}
fn rtpath(ecx: &ExtCtxt<'_>, s: Symbol) -> Vec<Ident> {
ecx.std_path(&[sym::fmt, sym::rt, sym::v1, s])
}
fn build_count(&self, c: parse::Count) -> P<ast::Expr> {
let sp = self.macsp;
let count = |c, arg| {
let mut path = Context::rtpath(self.ecx, sym::Count);
path.push(Ident::new(c, sp));
match arg {
Some(arg) => self.ecx.expr_call_global(sp, path, vec![arg]),
None => self.ecx.expr_path(self.ecx.path_global(sp, path)),
}
};
match c {
parse::CountIs(i) => count(sym::Is, Some(self.ecx.expr_usize(sp, i))),
parse::CountIsParam(i) => {
// This needs mapping too, as `i` is referring to a macro
// argument. If `i` is not found in `count_positions` then
// the error had already been emitted elsewhere.
let i = self.count_positions.get(&i).cloned().unwrap_or(0)
+ self.count_args_index_offset;
count(sym::Param, Some(self.ecx.expr_usize(sp, i)))
}
parse::CountImplied => count(sym::Implied, None),
// should never be the case, names are already resolved
parse::CountIsName(_) => panic!("should never happen"),
}
}
/// Build a literal expression from the accumulated string literals
fn build_literal_string(&mut self) -> P<ast::Expr> {
let sp = self.fmtsp;
let s = Symbol::intern(&self.literal);
self.literal.clear();
self.ecx.expr_str(sp, s)
}
/// Builds a static `rt::Argument` from a `parse::Piece` or append
/// to the `literal` string.
fn build_piece(
&mut self,
piece: &parse::Piece<'a>,
arg_index_consumed: &mut Vec<usize>,
) -> Option<P<ast::Expr>> {
let sp = self.macsp;
match *piece {
parse::String(s) => {
self.literal.push_str(s);
None
}
parse::NextArgument(ref arg) => {
// Build the position
let pos = {
match arg.position {
parse::ArgumentIs(i) | parse::ArgumentImplicitlyIs(i) => {
// Map to index in final generated argument array
// in case of multiple types specified
let arg_idx = match arg_index_consumed.get_mut(i) {
None => 0, // error already emitted elsewhere
Some(offset) => {
let idx_map = &self.arg_index_map[i];
// unwrap_or branch: error already emitted elsewhere
let arg_idx = *idx_map.get(*offset).unwrap_or(&0);
*offset += 1;
arg_idx
}
};
self.ecx.expr_usize(sp, arg_idx)
}
// should never be the case, because names are already
// resolved.
parse::ArgumentNamed(_) => panic!("should never happen"),
}
};
let simple_arg = parse::Argument {
position: {
// We don't have ArgumentNext any more, so we have to
// track the current argument ourselves.
let i = self.curarg;
self.curarg += 1;
parse::ArgumentIs(i)
},
format: parse::FormatSpec {
fill: arg.format.fill,
align: parse::AlignUnknown,
flags: 0,
precision: parse::CountImplied,
precision_span: None,
width: parse::CountImplied,
width_span: None,
ty: arg.format.ty,
ty_span: arg.format.ty_span,
},
};
let fill = arg.format.fill.unwrap_or(' ');
let pos_simple = arg.position.index() == simple_arg.position.index();
if arg.format.precision_span.is_some() || arg.format.width_span.is_some() {
self.arg_with_formatting.push(arg.format);
}
if !pos_simple || arg.format != simple_arg.format || fill != ' ' {
self.all_pieces_simple = false;
}
// Build the format
let fill = self.ecx.expr_lit(sp, ast::LitKind::Char(fill));
let align = |name| {
let mut p = Context::rtpath(self.ecx, sym::Alignment);
p.push(Ident::new(name, sp));
self.ecx.path_global(sp, p)
};
let align = match arg.format.align {
parse::AlignLeft => align(sym::Left),
parse::AlignRight => align(sym::Right),
parse::AlignCenter => align(sym::Center),
parse::AlignUnknown => align(sym::Unknown),
};
let align = self.ecx.expr_path(align);
let flags = self.ecx.expr_u32(sp, arg.format.flags);
let prec = self.build_count(arg.format.precision);
let width = self.build_count(arg.format.width);
let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, sym::FormatSpec));
let fmt = self.ecx.expr_struct(
sp,
path,
vec![
self.ecx.field_imm(sp, Ident::new(sym::fill, sp), fill),
self.ecx.field_imm(sp, Ident::new(sym::align, sp), align),
self.ecx.field_imm(sp, Ident::new(sym::flags, sp), flags),
self.ecx.field_imm(sp, Ident::new(sym::precision, sp), prec),
self.ecx.field_imm(sp, Ident::new(sym::width, sp), width),
],
);
let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, sym::Argument));
Some(self.ecx.expr_struct(
sp,
path,
vec![
self.ecx.field_imm(sp, Ident::new(sym::position, sp), pos),
self.ecx.field_imm(sp, Ident::new(sym::format, sp), fmt),
],
))
}
}
}
/// Actually builds the expression which the format_args! block will be
/// expanded to.
fn into_expr(self) -> P<ast::Expr> {
let mut args = Vec::with_capacity(
self.arg_unique_types.iter().map(|v| v.len()).sum::<usize>() + self.count_args.len(),
);
let mut heads = Vec::with_capacity(self.args.len());
// First, build up the static array which will become our precompiled
// format "string"
let pieces = self.ecx.expr_vec_slice(self.fmtsp, self.str_pieces);
// Before consuming the expressions, we have to remember spans for
// count arguments as they are now generated separate from other
// arguments, hence have no access to the `P<ast::Expr>`'s.
let spans_pos: Vec<_> = self.args.iter().map(|e| e.span).collect();
// Right now there is a bug such that for the expression:
// foo(bar(&1))
// the lifetime of `1` doesn't outlast the call to `bar`, so it's not
// valid for the call to `foo`. To work around this all arguments to the
// format! string are shoved into locals. Furthermore, we shove the address
// of each variable because we don't want to move out of the arguments
// passed to this function.
for (i, e) in self.args.into_iter().enumerate() {
for arg_ty in self.arg_unique_types[i].iter() {
args.push(Context::format_arg(self.ecx, self.macsp, e.span, arg_ty, i));
}
heads.push(self.ecx.expr_addr_of(e.span, e));
}
for pos in self.count_args {
let index = match pos {
Exact(i) => i,
_ => panic!("should never happen"),
};
let span = spans_pos[index];
args.push(Context::format_arg(self.ecx, self.macsp, span, &Count, index));
}
let args_array = self.ecx.expr_vec(self.macsp, args);
// Constructs an AST equivalent to:
//
// match (&arg0, &arg1) {
// (tmp0, tmp1) => args_array
// }
//
// It was:
//
// let tmp0 = &arg0;
// let tmp1 = &arg1;
// args_array
//
// Because of #11585 the new temporary lifetime rule, the enclosing
// statements for these temporaries become the let's themselves.
// If one or more of them are RefCell's, RefCell borrow() will also
// end there; they don't last long enough for args_array to use them.
// The match expression solves the scope problem.
//
// Note, it may also very well be transformed to:
//
// match arg0 {
// ref tmp0 => {
// match arg1 => {
// ref tmp1 => args_array } } }
//
// But the nested match expression is proved to perform not as well
// as series of let's; the first approach does.
let args_match = {
let pat = self.ecx.pat_ident(self.macsp, Ident::new(sym::_args, self.macsp));
let arm = self.ecx.arm(self.macsp, pat, args_array);
let head = self.ecx.expr(self.macsp, ast::ExprKind::Tup(heads));
self.ecx.expr_match(self.macsp, head, vec![arm])
};
let args_slice = self.ecx.expr_addr_of(self.macsp, args_match);
// Now create the fmt::Arguments struct with all our locals we created.
let (fn_name, fn_args) = if self.all_pieces_simple {
("new_v1", vec![pieces, args_slice])
} else {
// Build up the static array which will store our precompiled
// nonstandard placeholders, if there are any.
let fmt = self.ecx.expr_vec_slice(self.macsp, self.pieces);
let path = self.ecx.std_path(&[sym::fmt, sym::UnsafeArg, sym::new]);
let unsafe_arg = self.ecx.expr_call_global(self.macsp, path, Vec::new());
let unsafe_expr = self.ecx.expr_block(P(ast::Block {
stmts: vec![self.ecx.stmt_expr(unsafe_arg)],
id: ast::DUMMY_NODE_ID,
rules: BlockCheckMode::Unsafe(UnsafeSource::CompilerGenerated),
span: self.macsp,
tokens: None,
could_be_bare_literal: false,
}));
("new_v1_formatted", vec![pieces, args_slice, fmt, unsafe_expr])
};
let path = self.ecx.std_path(&[sym::fmt, sym::Arguments, Symbol::intern(fn_name)]);
self.ecx.expr_call_global(self.macsp, path, fn_args)
}
fn format_arg(
ecx: &ExtCtxt<'_>,
macsp: Span,
mut sp: Span,
ty: &ArgumentType,
arg_index: usize,
) -> P<ast::Expr> {
sp = ecx.with_def_site_ctxt(sp);
let arg = ecx.expr_ident(sp, Ident::new(sym::_args, sp));
let arg = ecx.expr(sp, ast::ExprKind::Field(arg, Ident::new(sym::integer(arg_index), sp)));
let trait_ = match *ty {
Placeholder(trait_) if trait_ == "<invalid>" => return DummyResult::raw_expr(sp, true),
Placeholder(trait_) => trait_,
Count => {
let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, sym::from_usize]);
return ecx.expr_call_global(macsp, path, vec![arg]);
}
};
let path = ecx.std_path(&[sym::fmt, Symbol::intern(trait_), sym::fmt]);
let format_fn = ecx.path_global(sp, path);
let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, sym::new]);
ecx.expr_call_global(macsp, path, vec![arg, ecx.expr_path(format_fn)])
}
}
fn expand_format_args_impl<'cx>(
ecx: &'cx mut ExtCtxt<'_>,
mut sp: Span,
tts: TokenStream,
nl: bool,
) -> Box<dyn base::MacResult + 'cx> {
sp = ecx.with_def_site_ctxt(sp);
match parse_args(ecx, sp, tts) {
Ok((efmt, args, names)) => {
MacEager::expr(expand_preparsed_format_args(ecx, sp, efmt, args, names, nl))
}
Err(mut err) => {
err.emit();
DummyResult::any(sp)
}
}
}
pub fn expand_format_args<'cx>(
ecx: &'cx mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'cx> {
expand_format_args_impl(ecx, sp, tts, false)
}
pub fn expand_format_args_nl<'cx>(
ecx: &'cx mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'cx> {
expand_format_args_impl(ecx, sp, tts, true)
}
/// Take the various parts of `format_args!(efmt, args..., name=names...)`
/// and construct the appropriate formatting expression.
pub fn expand_preparsed_format_args(
ecx: &mut ExtCtxt<'_>,
sp: Span,
efmt: P<ast::Expr>,
args: Vec<P<ast::Expr>>,
names: FxHashMap<Symbol, usize>,
append_newline: bool,
) -> P<ast::Expr> {
// NOTE: this verbose way of initializing `Vec<Vec<ArgumentType>>` is because
// `ArgumentType` does not derive `Clone`.
let arg_types: Vec<_> = (0..args.len()).map(|_| Vec::new()).collect();
let arg_unique_types: Vec<_> = (0..args.len()).map(|_| Vec::new()).collect();
let mut macsp = ecx.call_site();
macsp = ecx.with_def_site_ctxt(macsp);
let msg = "format argument must be a string literal";
let fmt_sp = efmt.span;
let efmt_kind_is_lit: bool = matches!(efmt.kind, ast::ExprKind::Lit(_));
let (fmt_str, fmt_style, fmt_span) = match expr_to_spanned_string(ecx, efmt, msg) {
Ok(mut fmt) if append_newline => {
fmt.0 = Symbol::intern(&format!("{}\n", fmt.0));
fmt
}
Ok(fmt) => fmt,
Err(err) => {
if let Some((mut err, suggested)) = err {
let sugg_fmt = match args.len() {
0 => "{}".to_string(),
_ => format!("{}{{}}", "{} ".repeat(args.len())),
};
if !suggested {
err.span_suggestion(
fmt_sp.shrink_to_lo(),
"you might be missing a string literal to format with",
format!("\"{}\", ", sugg_fmt),
Applicability::MaybeIncorrect,
);
}
err.emit();
}
return DummyResult::raw_expr(sp, true);
}
};
let str_style = match fmt_style {
ast::StrStyle::Cooked => None,
ast::StrStyle::Raw(raw) => Some(raw as usize),
};
let fmt_str = &fmt_str.as_str(); // for the suggestions below
let fmt_snippet = ecx.source_map().span_to_snippet(fmt_sp).ok();
let mut parser = parse::Parser::new(
fmt_str,
str_style,
fmt_snippet,
append_newline,
parse::ParseMode::Format,
);
let mut unverified_pieces = Vec::new();
while let Some(piece) = parser.next() {
if !parser.errors.is_empty() {
break;
} else {
unverified_pieces.push(piece);
}
}
if !parser.errors.is_empty() {
let err = parser.errors.remove(0);
let sp = if efmt_kind_is_lit {
fmt_span.from_inner(err.span)
} else {
// The format string could be another macro invocation, e.g.:
// format!(concat!("abc", "{}"), 4);
// However, `err.span` is an inner span relative to the *result* of
// the macro invocation, which is why we would get a nonsensical
// result calling `fmt_span.from_inner(err.span)` as above, and
// might even end up inside a multibyte character (issue #86085).
// Therefore, we conservatively report the error for the entire
// argument span here.
fmt_span
};
let mut e = ecx.struct_span_err(sp, &format!("invalid format string: {}", err.description));
e.span_label(sp, err.label + " in format string");
if let Some(note) = err.note {
e.note(¬e);
}
if let Some((label, span)) = err.secondary_label {
let sp = fmt_span.from_inner(span);
e.span_label(sp, label);
}
e.emit();
return DummyResult::raw_expr(sp, true);
}
let arg_spans = parser.arg_places.iter().map(|span| fmt_span.from_inner(*span)).collect();
let named_pos: FxHashSet<usize> = names.values().cloned().collect();
let mut cx = Context {
ecx,
args,
arg_types,
arg_unique_types,
names,
curarg: 0,
curpiece: 0,
arg_index_map: Vec::new(),
count_args: Vec::new(),
count_positions: FxHashMap::default(),
count_positions_count: 0,
count_args_index_offset: 0,
literal: String::new(),
pieces: Vec::with_capacity(unverified_pieces.len()),
str_pieces: Vec::with_capacity(unverified_pieces.len()),
all_pieces_simple: true,
macsp,
fmtsp: fmt_span,
invalid_refs: Vec::new(),
arg_spans,
arg_with_formatting: Vec::new(),
is_literal: parser.is_literal,
};
// This needs to happen *after* the Parser has consumed all pieces to create all the spans
let pieces = unverified_pieces
.into_iter()
.map(|mut piece| {
cx.verify_piece(&piece);
cx.resolve_name_inplace(&mut piece);
piece
})
.collect::<Vec<_>>();
let numbered_position_args = pieces.iter().any(|arg: &parse::Piece<'_>| match *arg {
parse::String(_) => false,
parse::NextArgument(arg) => matches!(arg.position, parse::Position::ArgumentIs(_)),
});
cx.build_index_map();
let mut arg_index_consumed = vec![0usize; cx.arg_index_map.len()];
for piece in pieces {
if let Some(piece) = cx.build_piece(&piece, &mut arg_index_consumed) {
let s = cx.build_literal_string();
cx.str_pieces.push(s);
cx.pieces.push(piece);
}
}
if !cx.literal.is_empty() {
let s = cx.build_literal_string();
cx.str_pieces.push(s);
}
if !cx.invalid_refs.is_empty() {
cx.report_invalid_references(numbered_position_args);
}
// Make sure that all arguments were used and all arguments have types.
let errs = cx
.arg_types
.iter()
.enumerate()
.filter(|(i, ty)| ty.is_empty() && !cx.count_positions.contains_key(&i))
.map(|(i, _)| {
let msg = if named_pos.contains(&i) {
// named argument
"named argument never used"
} else {
// positional argument
"argument never used"
};
(cx.args[i].span, msg)
})
.collect::<Vec<_>>();
let errs_len = errs.len();
if !errs.is_empty() {
let args_used = cx.arg_types.len() - errs_len;
let args_unused = errs_len;
let mut diag = {
if let [(sp, msg)] = &errs[..] {
let mut diag = cx.ecx.struct_span_err(*sp, *msg);
diag.span_label(*sp, *msg);
diag
} else {
let mut diag = cx.ecx.struct_span_err(
errs.iter().map(|&(sp, _)| sp).collect::<Vec<Span>>(),
"multiple unused formatting arguments",
);
diag.span_label(cx.fmtsp, "multiple missing formatting specifiers");
for (sp, msg) in errs {
diag.span_label(sp, msg);
}
diag
}
};
// Used to ensure we only report translations for *one* kind of foreign format.
let mut found_foreign = false;
// Decide if we want to look for foreign formatting directives.
if args_used < args_unused {
use super::format_foreign as foreign;
// The set of foreign substitutions we've explained. This prevents spamming the user
// with `%d should be written as {}` over and over again.
let mut explained = FxHashSet::default();
macro_rules! check_foreign {
($kind:ident) => {{
let mut show_doc_note = false;
let mut suggestions = vec![];
// account for `"` and account for raw strings `r#`
let padding = str_style.map(|i| i + 2).unwrap_or(1);
for sub in foreign::$kind::iter_subs(fmt_str, padding) {
let (trn, success) = match sub.translate() {
Ok(trn) => (trn, true),
Err(Some(msg)) => (msg, false),
// If it has no translation, don't call it out specifically.
_ => continue,
};
let pos = sub.position();
let sub = String::from(sub.as_str());
if explained.contains(&sub) {
continue;
}
explained.insert(sub.clone());
if !found_foreign {
found_foreign = true;
show_doc_note = true;
}
if let Some(inner_sp) = pos {
let sp = fmt_sp.from_inner(inner_sp);
if success {
suggestions.push((sp, trn));
} else {
diag.span_note(
sp,
&format!("format specifiers use curly braces, and {}", trn),
);
}
} else {
if success {
diag.help(&format!("`{}` should be written as `{}`", sub, trn));
} else {
diag.note(&format!(
"`{}` should use curly braces, and {}",
sub, trn
));
}
}
}
if show_doc_note {
diag.note(concat!(
stringify!($kind),
" formatting not supported; see the documentation for `std::fmt`",
));
}
if suggestions.len() > 0 {
diag.multipart_suggestion(
"format specifiers use curly braces",
suggestions,
Applicability::MachineApplicable,
);
}
}};
}
check_foreign!(printf);
if !found_foreign {
check_foreign!(shell);
}
}
if !found_foreign && errs_len == 1 {
diag.span_label(cx.fmtsp, "formatting specifier missing");
}
diag.emit();
}
cx.into_expr()
}
| 40.208645 | 105 | 0.474954 |
397d6ebf2aa9d0c2f714ba54fb97a2a54d79573c | 1,603 | extern crate handlebars;
use handlebars::{Handlebars, JsonRender, RenderContext, RenderError, Helper, Context, Output};
extern crate pulldown_cmark;
use self::pulldown_cmark::Parser;
use self::pulldown_cmark::html;
pub fn render_html(text: String) -> String {
let mut s = String::with_capacity(text.len() * 3 / 2);
let p = Parser::new(&*text);
html::push_html(&mut s, p);
s
}
pub fn markdown_helper(h: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut Output)
-> Result<(), RenderError> {
let markdown_text_var =
try!(h.param(0)
.ok_or_else(|| RenderError::new("Param not found for helper \"markdown\"")));
let markdown_text = markdown_text_var.value().render();
let html_string = render_html(markdown_text);
try!(out.write(&html_string));
Ok(())
}
#[cfg(test)]
mod test {
use handlebars::Handlebars;
use std::collections::BTreeMap;
#[test]
fn test_markdown() {
let t0 = "{{markdown x}}";
let mut handlebars = Handlebars::new();
handlebars.register_helper("markdown", Box::new(::markdown_helper));
assert!(handlebars.register_template_string("t0", t0).is_ok());
let mut m: BTreeMap<String, String> = BTreeMap::new();
m.insert("x".into(), "# wow\n\n## second wow".into());
let r0 = handlebars.render("t0", &m);
assert_eq!(r0.ok().unwrap(),
"<h1>wow</h1>\n<h2>second wow</h2>\n".to_string());
}
}
| 31.431373 | 94 | 0.578915 |
f432a95894c4bd0bb6c6525a7b0829c7760f423d | 11,414 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use crate::user_info::UserInfo;
use rusty_leveldb::LdbIterator;
use rusty_leveldb::DB;
use std::path::Path;
use std::prelude::v1::*;
use std::sync::mpsc::{channel, Sender};
use std::thread;
use thiserror::Error;
#[derive(Error, Debug)]
pub(crate) enum DbError {
#[error("user not exist")]
UserNotExist,
#[error("user exist")]
UserExist,
#[error("mpsc error")]
ConnectionError,
#[error("leveldb error")]
LevelDbInternalError,
#[error("invalid response")]
InvalidResponse,
#[error("invalid request")]
InvalidRequest,
}
impl<T> From<std::sync::mpsc::SendError<T>> for DbError {
fn from(_error: std::sync::mpsc::SendError<T>) -> Self {
DbError::ConnectionError
}
}
impl From<std::sync::mpsc::RecvError> for DbError {
fn from(_error: std::sync::mpsc::RecvError) -> Self {
DbError::ConnectionError
}
}
#[derive(Clone)]
struct GetRequest {
key: Vec<u8>,
}
#[derive(Clone)]
struct GetResponse {
value: Vec<u8>,
}
#[derive(Clone)]
struct ListRequest {
key: String,
}
#[derive(Clone)]
struct ListResponse {
values: Vec<String>,
}
#[derive(Clone)]
struct CreateRequest {
key: Vec<u8>,
value: Vec<u8>,
}
#[derive(Clone)]
struct UpdateRequest {
key: Vec<u8>,
value: Vec<u8>,
}
#[derive(Clone)]
struct DeleteRequest {
key: Vec<u8>,
}
#[derive(Clone)]
enum DbRequest {
Get(GetRequest),
Create(CreateRequest),
Update(UpdateRequest),
Delete(DeleteRequest),
List(ListRequest),
Ping,
}
#[derive(Clone)]
enum DbResponse {
Get(GetResponse),
List(ListResponse),
Create,
Delete,
Update,
Ping,
}
#[derive(Clone)]
struct DBCall {
pub sender: Sender<Result<DbResponse, DbError>>,
pub request: DbRequest,
}
pub(crate) struct Database {
sender: Sender<DBCall>,
}
#[cfg(not(test_mode))]
pub(crate) fn create_persistent_auth_db(base_dir: impl AsRef<Path>) -> DB {
let key = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09,
0x08,
];
let opt = rusty_leveldb::Options::new_disk_db_with(key);
let db_path = base_dir.as_ref().join("authentication_db");
log::info!("open auth db: {:?}", db_path);
let database = DB::open(db_path, opt).unwrap();
database
}
#[cfg(test_mode)]
pub(crate) fn create_in_memory_auth_db(_base_dir: impl AsRef<Path>) -> DB {
let opt = rusty_leveldb::in_memory();
log::info!("open in_memory auth db");
DB::open("authentication_db", opt).unwrap()
}
impl Database {
pub(crate) fn open(db_base: impl AsRef<Path>) -> Result<Self, DbError> {
let (sender, receiver) = channel();
let db_base = db_base.as_ref().to_owned();
thread::spawn(move || {
#[cfg(not(test_mode))]
let mut database = create_persistent_auth_db(&db_base);
#[cfg(test_mode)]
let mut database = create_in_memory_auth_db(&db_base);
loop {
let call: DBCall = match receiver.recv() {
Ok(req) => req,
Err(e) => {
warn!("mspc receive error: {}", e);
break;
}
};
let sender = call.sender;
let response = match call.request {
DbRequest::Get(request) => match database.get(&request.key) {
Some(value) => Ok(DbResponse::Get(GetResponse { value })),
None => Err(DbError::UserNotExist),
},
DbRequest::Delete(request) => match database.delete(&request.key) {
Ok(_) => Ok(DbResponse::Delete),
Err(_) => Err(DbError::UserNotExist),
},
DbRequest::Create(request) => match database.get(&request.key) {
Some(_) => Err(DbError::UserExist),
None => match database.put(&request.key, &request.value) {
Ok(_) => match database.flush() {
Ok(_) => Ok(DbResponse::Create),
Err(_) => Err(DbError::LevelDbInternalError),
},
Err(_) => Err(DbError::LevelDbInternalError),
},
},
DbRequest::Update(request) => match database.get(&request.key) {
Some(_) => match database.put(&request.key, &request.value) {
Ok(_) => match database.flush() {
Ok(_) => Ok(DbResponse::Update),
Err(_) => Err(DbError::LevelDbInternalError),
},
Err(_) => Err(DbError::LevelDbInternalError),
},
None => Err(DbError::UserNotExist),
},
DbRequest::List(request) => match database.new_iter() {
Ok(mut iter) => {
let mut values = Vec::new();
while let Some((_, ref value)) = iter.next() {
let user: UserInfo =
serde_json::from_slice(value).unwrap_or_default();
if (!request.key.is_empty() && user.has_attribute(&request.key))
|| request.key.is_empty()
{
values.push(user.id);
}
}
Ok(DbResponse::List(ListResponse { values }))
}
Err(_) => Err(DbError::LevelDbInternalError),
},
DbRequest::Ping => Ok(DbResponse::Ping),
};
match sender.send(response) {
Ok(_) => (),
Err(e) => warn!("mpsc send error: {}", e),
}
}
});
let database = Self { sender };
let client = database.get_client();
// Check whether the user database is successfully opened.
client.ping()?;
Ok(database)
}
pub(crate) fn get_client(&self) -> DbClient {
DbClient {
sender: self.sender.clone(),
}
}
}
#[derive(Clone)]
pub(crate) struct DbClient {
sender: Sender<DBCall>,
}
impl DbClient {
pub(crate) fn get_user(&self, id: &str) -> Result<UserInfo, DbError> {
let (sender, receiver) = channel();
let request = DbRequest::Get(GetRequest {
key: id.as_bytes().to_vec(),
});
let call = DBCall { sender, request };
self.sender.send(call)?;
let result = receiver.recv()?;
let db_response = result?;
match db_response {
DbResponse::Get(response) => {
let user = serde_json::from_slice(&response.value)
.map_err(|_| DbError::InvalidResponse)?;
Ok(user)
}
_ => Err(DbError::UserNotExist),
}
}
pub(crate) fn delete_user(&self, id: &str) -> Result<(), DbError> {
let (sender, receiver) = channel();
let request = DbRequest::Delete(DeleteRequest {
key: id.as_bytes().to_vec(),
});
let call = DBCall { sender, request };
self.sender.send(call)?;
let result = receiver.recv()?;
let db_response = result?;
match db_response {
DbResponse::Delete => Ok(()),
_ => Err(DbError::UserNotExist),
}
}
pub(crate) fn create_user(&self, user: &UserInfo) -> Result<(), DbError> {
let (sender, receiver) = channel();
let user_bytes = serde_json::to_vec(&user).map_err(|_| DbError::InvalidRequest)?;
let request = DbRequest::Create(CreateRequest {
key: user.id.as_bytes().to_vec(),
value: user_bytes.to_vec(),
});
let call = DBCall { sender, request };
self.sender.send(call)?;
let result = receiver.recv()?;
let db_response = result?;
match db_response {
DbResponse::Create => Ok(()),
_ => Err(DbError::InvalidResponse),
}
}
pub(crate) fn list_users_by_attribute(&self, attribute: &str) -> Result<Vec<String>, DbError> {
let (sender, receiver) = channel();
let request = DbRequest::List(ListRequest {
key: attribute.to_string(),
});
let call = DBCall { sender, request };
self.sender.send(call)?;
let result = receiver.recv()?;
let db_response = result?;
match db_response {
DbResponse::List(response) => Ok(response.values),
_ => Err(DbError::UserNotExist),
}
}
pub(crate) fn list_users(&self) -> Result<Vec<String>, DbError> {
let (sender, receiver) = channel();
let request = DbRequest::List(ListRequest {
key: "".to_string(),
});
let call = DBCall { sender, request };
self.sender.send(call)?;
let result = receiver.recv()?;
let db_response = result?;
match db_response {
DbResponse::List(response) => Ok(response.values),
_ => Err(DbError::UserNotExist),
}
}
pub(crate) fn update_user(&self, user: &UserInfo) -> Result<(), DbError> {
let (sender, receiver) = channel();
let user_bytes = serde_json::to_vec(&user).map_err(|_| DbError::InvalidRequest)?;
let request = DbRequest::Update(UpdateRequest {
key: user.id.as_bytes().to_vec(),
value: user_bytes.to_vec(),
});
let call = DBCall { sender, request };
self.sender.send(call)?;
let result = receiver.recv()?;
let db_response = result?;
match db_response {
DbResponse::Update => Ok(()),
_ => Err(DbError::InvalidResponse),
}
}
// Check whether the database is opened successfully.
fn ping(&self) -> Result<(), DbError> {
let (sender, receiver) = channel();
let request = DbRequest::Ping;
let call = DBCall { sender, request };
self.sender.send(call)?;
let result = receiver.recv()?;
let db_response = result?;
match db_response {
DbResponse::Ping => Ok(()),
_ => Err(DbError::InvalidResponse),
}
}
}
| 32.988439 | 99 | 0.530927 |
d7a2a140a5f5a23ebe0fe57339394545c942718b | 8,383 | use managed::ManagedSlice;
use {Error, Result};
use super::RingBuffer;
/// Size and header of a packet.
#[derive(Debug, Clone, Copy)]
pub struct PacketMetadata<H> {
size: usize,
header: Option<H>
}
impl<H> PacketMetadata<H> {
/// Create an empty packet description.
pub fn empty() -> PacketMetadata<H> {
Self::padding(0)
}
fn padding(size: usize) -> PacketMetadata<H> {
PacketMetadata {
size: size,
header: None
}
}
fn packet(size: usize, header: H) -> PacketMetadata<H> {
PacketMetadata {
size: size,
header: Some(header)
}
}
fn is_padding(&self) -> bool {
self.header.is_none()
}
}
/// An UDP packet ring buffer.
#[derive(Debug)]
pub struct PacketBuffer<'a, 'b, H: 'a> {
metadata_ring: RingBuffer<'a, PacketMetadata<H>>,
payload_ring: RingBuffer<'b, u8>,
}
impl<'a, 'b, H> PacketBuffer<'a, 'b, H> {
/// Create a new packet buffer with the provided metadata and payload storage.
///
/// Metadata storage limits the maximum _number_ of packets in the buffer and payload
/// storage limits the maximum _total size_ of packets.
pub fn new<MS, PS>(metadata_storage: MS, payload_storage: PS) -> PacketBuffer<'a, 'b, H>
where MS: Into<ManagedSlice<'a, PacketMetadata<H>>>,
PS: Into<ManagedSlice<'b, u8>>,
{
PacketBuffer {
metadata_ring: RingBuffer::new(metadata_storage),
payload_ring: RingBuffer::new(payload_storage),
}
}
/// Query whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.metadata_ring.is_empty()
}
/// Query whether the buffer is full.
pub fn is_full(&self) -> bool {
self.metadata_ring.is_full()
}
// There is currently no enqueue_with() because of the complexity of managing padding
// in case of failure.
/// Enqueue a single packet with the given header into the buffer, and
/// return a reference to its payload, or return `Err(Error::Exhausted)`
/// if the buffer is full, or return `Err(Error::Truncated)` if the buffer
/// does not have enough spare payload space.
pub fn enqueue(&mut self, size: usize, header: H) -> Result<&mut [u8]> {
if self.payload_ring.capacity() < size {
return Err(Error::Truncated)
}
if self.metadata_ring.is_full() {
return Err(Error::Exhausted)
}
let window = self.payload_ring.window();
let contig_window = self.payload_ring.contiguous_window();
if window < size || (window != contig_window && window - contig_window < size) {
return Err(Error::Exhausted)
}
if contig_window < size {
*self.metadata_ring.enqueue_one()? = PacketMetadata::padding(size);
self.payload_ring.enqueue_many(size);
}
*self.metadata_ring.enqueue_one()? = PacketMetadata::packet(size, header);
let payload_buf = self.payload_ring.enqueue_many(size);
debug_assert!(payload_buf.len() == size);
Ok(payload_buf)
}
fn dequeue_padding(&mut self) {
let Self { ref mut metadata_ring, ref mut payload_ring } = *self;
let _ = metadata_ring.dequeue_one_with(|metadata| {
if metadata.is_padding() {
payload_ring.dequeue_many(metadata.size);
Ok(()) // dequeue metadata
} else {
Err(Error::Exhausted) // don't dequeue metadata
}
});
}
/// Call `f` with a single packet from the buffer, and dequeue the packet if `f`
/// returns successfully, or return `Err(Error::Exhausted)` if the buffer is empty.
pub fn dequeue_with<'c, R, F>(&'c mut self, f: F) -> Result<R>
where F: FnOnce(&mut H, &'c mut [u8]) -> Result<R> {
self.dequeue_padding();
let Self { ref mut metadata_ring, ref mut payload_ring } = *self;
metadata_ring.dequeue_one_with(move |metadata| {
let PacketMetadata { ref mut header, size } = *metadata;
payload_ring.dequeue_many_with(|payload_buf| {
debug_assert!(payload_buf.len() >= size);
match f(header.as_mut().unwrap(), &mut payload_buf[..size]) {
Ok(val) => (size, Ok(val)),
Err(err) => (0, Err(err)),
}
}).1
})
}
/// Dequeue a single packet from the buffer, and return a reference to its payload
/// as well as its header, or return `Err(Error::Exhausted)` if the buffer is empty.
pub fn dequeue(&mut self) -> Result<(H, &mut [u8])> {
self.dequeue_padding();
let PacketMetadata { ref mut header, size } = *self.metadata_ring.dequeue_one()?;
let payload_buf = self.payload_ring.dequeue_many(size);
debug_assert!(payload_buf.len() == size);
Ok((header.take().unwrap(), payload_buf))
}
}
#[cfg(test)]
mod test {
use super::*;
fn buffer() -> PacketBuffer<'static, 'static, ()> {
PacketBuffer::new(vec![PacketMetadata::empty(); 4],
vec![0u8; 16])
}
#[test]
fn test_simple() {
let mut buffer = buffer();
buffer.enqueue(6, ()).unwrap().copy_from_slice(b"abcdef");
assert_eq!(buffer.enqueue(16, ()), Err(Error::Exhausted));
assert_eq!(buffer.metadata_ring.len(), 1);
assert_eq!(buffer.dequeue().unwrap().1, &b"abcdef"[..]);
assert_eq!(buffer.dequeue(), Err(Error::Exhausted));
}
#[test]
fn test_padding() {
let mut buffer = buffer();
assert!(buffer.enqueue(6, ()).is_ok());
assert!(buffer.enqueue(8, ()).is_ok());
assert!(buffer.dequeue().is_ok());
buffer.enqueue(4, ()).unwrap().copy_from_slice(b"abcd");
assert_eq!(buffer.metadata_ring.len(), 3);
assert!(buffer.dequeue().is_ok());
assert_eq!(buffer.dequeue().unwrap().1, &b"abcd"[..]);
assert_eq!(buffer.metadata_ring.len(), 0);
}
#[test]
fn test_dequeue_with() {
let mut buffer = buffer();
assert!(buffer.enqueue(6, ()).is_ok());
assert!(buffer.enqueue(8, ()).is_ok());
assert!(buffer.dequeue().is_ok());
buffer.enqueue(4, ()).unwrap().copy_from_slice(b"abcd");
assert_eq!(buffer.metadata_ring.len(), 3);
assert!(buffer.dequeue().is_ok());
assert!(buffer.dequeue_with(|_, _| Err(Error::Unaddressable) as Result<()>).is_err());
assert_eq!(buffer.metadata_ring.len(), 1);
assert!(buffer.dequeue_with(|&mut (), payload| {
assert_eq!(payload, &b"abcd"[..]);
Ok(())
}).is_ok());
assert_eq!(buffer.metadata_ring.len(), 0);
}
#[test]
fn test_metadata_full_empty() {
let mut buffer = buffer();
assert_eq!(buffer.is_empty(), true);
assert_eq!(buffer.is_full(), false);
assert!(buffer.enqueue(1, ()).is_ok());
assert_eq!(buffer.is_empty(), false);
assert!(buffer.enqueue(1, ()).is_ok());
assert!(buffer.enqueue(1, ()).is_ok());
assert_eq!(buffer.is_full(), false);
assert_eq!(buffer.is_empty(), false);
assert!(buffer.enqueue(1, ()).is_ok());
assert_eq!(buffer.is_full(), true);
assert_eq!(buffer.is_empty(), false);
assert_eq!(buffer.metadata_ring.len(), 4);
assert_eq!(buffer.enqueue(1, ()), Err(Error::Exhausted));
}
#[test]
fn test_window_too_small() {
let mut buffer = buffer();
assert!(buffer.enqueue(4, ()).is_ok());
assert!(buffer.enqueue(8, ()).is_ok());
assert!(buffer.dequeue().is_ok());
assert_eq!(buffer.enqueue(16, ()), Err(Error::Exhausted));
assert_eq!(buffer.metadata_ring.len(), 1);
}
#[test]
fn test_contiguous_window_too_small() {
let mut buffer = buffer();
assert!(buffer.enqueue(4, ()).is_ok());
assert!(buffer.enqueue(8, ()).is_ok());
assert!(buffer.dequeue().is_ok());
assert_eq!(buffer.enqueue(8, ()), Err(Error::Exhausted));
assert_eq!(buffer.metadata_ring.len(), 1);
}
#[test]
fn test_capacity_too_small() {
let mut buffer = buffer();
assert_eq!(buffer.enqueue(32, ()), Err(Error::Truncated));
}
}
| 33.532 | 94 | 0.578313 |
1c472f38184eadca7faedd6b7c4899278ee24356 | 8,664 | use super::{
Arm, Block, Expr, ExprKind, Guard, InlineAsmOperand, Pat, PatKind, Stmt, StmtKind, Thir,
};
use rustc_middle::ty::Const;
pub trait Visitor<'a, 'tcx: 'a>: Sized {
fn thir(&self) -> &'a Thir<'tcx>;
fn visit_expr(&mut self, expr: &Expr<'tcx>) {
walk_expr(self, expr);
}
fn visit_stmt(&mut self, stmt: &Stmt<'tcx>) {
walk_stmt(self, stmt);
}
fn visit_block(&mut self, block: &Block) {
walk_block(self, block);
}
fn visit_arm(&mut self, arm: &Arm<'tcx>) {
walk_arm(self, arm);
}
fn visit_pat(&mut self, pat: &Pat<'tcx>) {
walk_pat(self, pat);
}
fn visit_const(&mut self, _cnst: Const<'tcx>) {}
}
pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Expr<'tcx>) {
use ExprKind::*;
match expr.kind {
Scope { value, region_scope: _, lint_level: _ } => {
visitor.visit_expr(&visitor.thir()[value])
}
Box { value } => visitor.visit_expr(&visitor.thir()[value]),
If { cond, then, else_opt, if_then_scope: _ } => {
visitor.visit_expr(&visitor.thir()[cond]);
visitor.visit_expr(&visitor.thir()[then]);
if let Some(else_expr) = else_opt {
visitor.visit_expr(&visitor.thir()[else_expr]);
}
}
Call { fun, ref args, ty: _, from_hir_call: _, fn_span: _ } => {
visitor.visit_expr(&visitor.thir()[fun]);
for &arg in &**args {
visitor.visit_expr(&visitor.thir()[arg]);
}
}
Deref { arg } => visitor.visit_expr(&visitor.thir()[arg]),
Binary { lhs, rhs, op: _ } | LogicalOp { lhs, rhs, op: _ } => {
visitor.visit_expr(&visitor.thir()[lhs]);
visitor.visit_expr(&visitor.thir()[rhs]);
}
Unary { arg, op: _ } => visitor.visit_expr(&visitor.thir()[arg]),
Cast { source } => visitor.visit_expr(&visitor.thir()[source]),
Use { source } => visitor.visit_expr(&visitor.thir()[source]),
NeverToAny { source } => visitor.visit_expr(&visitor.thir()[source]),
Pointer { source, cast: _ } => visitor.visit_expr(&visitor.thir()[source]),
Let { expr, .. } => {
visitor.visit_expr(&visitor.thir()[expr]);
}
Loop { body } => visitor.visit_expr(&visitor.thir()[body]),
Match { scrutinee, ref arms } => {
visitor.visit_expr(&visitor.thir()[scrutinee]);
for &arm in &**arms {
visitor.visit_arm(&visitor.thir()[arm]);
}
}
Block { ref body } => visitor.visit_block(body),
Assign { lhs, rhs } | AssignOp { lhs, rhs, op: _ } => {
visitor.visit_expr(&visitor.thir()[lhs]);
visitor.visit_expr(&visitor.thir()[rhs]);
}
Field { lhs, name: _ } => visitor.visit_expr(&visitor.thir()[lhs]),
Index { lhs, index } => {
visitor.visit_expr(&visitor.thir()[lhs]);
visitor.visit_expr(&visitor.thir()[index]);
}
VarRef { id: _ } | UpvarRef { closure_def_id: _, var_hir_id: _ } => {}
Borrow { arg, borrow_kind: _ } => visitor.visit_expr(&visitor.thir()[arg]),
AddressOf { arg, mutability: _ } => visitor.visit_expr(&visitor.thir()[arg]),
Break { value, label: _ } => {
if let Some(value) = value {
visitor.visit_expr(&visitor.thir()[value])
}
}
Continue { label: _ } => {}
Return { value } => {
if let Some(value) = value {
visitor.visit_expr(&visitor.thir()[value])
}
}
ConstBlock { value } => visitor.visit_const(value),
Repeat { value, count } => {
visitor.visit_expr(&visitor.thir()[value]);
visitor.visit_const(count);
}
Array { ref fields } | Tuple { ref fields } => {
for &field in &**fields {
visitor.visit_expr(&visitor.thir()[field]);
}
}
Adt(box crate::thir::Adt {
ref fields,
ref base,
adt_def: _,
variant_index: _,
substs: _,
user_ty: _,
}) => {
for field in &**fields {
visitor.visit_expr(&visitor.thir()[field.expr]);
}
if let Some(base) = base {
visitor.visit_expr(&visitor.thir()[base.base]);
}
}
PlaceTypeAscription { source, user_ty: _ } | ValueTypeAscription { source, user_ty: _ } => {
visitor.visit_expr(&visitor.thir()[source])
}
Closure { closure_id: _, substs: _, upvars: _, movability: _, fake_reads: _ } => {}
Literal { literal, user_ty: _, const_id: _ } => visitor.visit_const(literal),
StaticRef { alloc_id: _, ty: _, def_id: _ } => {}
InlineAsm { ref operands, template: _, options: _, line_spans: _ } => {
for op in &**operands {
use InlineAsmOperand::*;
match op {
In { expr, reg: _ }
| Out { expr: Some(expr), reg: _, late: _ }
| InOut { expr, reg: _, late: _ }
| SymFn { expr } => visitor.visit_expr(&visitor.thir()[*expr]),
SplitInOut { in_expr, out_expr, reg: _, late: _ } => {
visitor.visit_expr(&visitor.thir()[*in_expr]);
if let Some(out_expr) = out_expr {
visitor.visit_expr(&visitor.thir()[*out_expr]);
}
}
Out { expr: None, reg: _, late: _ }
| Const { value: _, span: _ }
| SymStatic { def_id: _ } => {}
}
}
}
ThreadLocalRef(_) => {}
Yield { value } => visitor.visit_expr(&visitor.thir()[value]),
}
}
pub fn walk_stmt<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, stmt: &Stmt<'tcx>) {
match &stmt.kind {
StmtKind::Expr { expr, scope: _ } => visitor.visit_expr(&visitor.thir()[*expr]),
StmtKind::Let {
initializer,
remainder_scope: _,
init_scope: _,
ref pattern,
lint_level: _,
} => {
if let Some(init) = initializer {
visitor.visit_expr(&visitor.thir()[*init]);
}
visitor.visit_pat(pattern);
}
}
}
pub fn walk_block<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, block: &Block) {
for &stmt in &*block.stmts {
visitor.visit_stmt(&visitor.thir()[stmt]);
}
if let Some(expr) = block.expr {
visitor.visit_expr(&visitor.thir()[expr]);
}
}
pub fn walk_arm<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, arm: &Arm<'tcx>) {
match arm.guard {
Some(Guard::If(expr)) => visitor.visit_expr(&visitor.thir()[expr]),
Some(Guard::IfLet(ref pat, expr)) => {
visitor.visit_pat(pat);
visitor.visit_expr(&visitor.thir()[expr]);
}
None => {}
}
visitor.visit_pat(&arm.pattern);
visitor.visit_expr(&visitor.thir()[arm.body]);
}
pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'tcx>) {
use PatKind::*;
match pat.kind.as_ref() {
AscribeUserType { subpattern, ascription: _ }
| Deref { subpattern }
| Binding {
subpattern: Some(subpattern),
mutability: _,
mode: _,
var: _,
ty: _,
is_primary: _,
name: _,
} => visitor.visit_pat(&subpattern),
Binding { .. } | Wild => {}
Variant { subpatterns, adt_def: _, substs: _, variant_index: _ } | Leaf { subpatterns } => {
for subpattern in subpatterns {
visitor.visit_pat(&subpattern.pattern);
}
}
Constant { value } => visitor.visit_const(*value),
Range(range) => {
visitor.visit_const(range.lo);
visitor.visit_const(range.hi);
}
Slice { prefix, slice, suffix } | Array { prefix, slice, suffix } => {
for subpattern in prefix {
visitor.visit_pat(&subpattern);
}
if let Some(pat) = slice {
visitor.visit_pat(pat);
}
for subpattern in suffix {
visitor.visit_pat(&subpattern);
}
}
Or { pats } => {
for pat in pats {
visitor.visit_pat(&pat);
}
}
};
}
| 36.868085 | 100 | 0.496999 |
75af1cc6c0b8faa1465fad196da606bb2fbae635 | 6,698 | /* Copyright (c) 2018-2021 Jeremy Davis ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
* and associated documentation files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
//! This module provides ways to define a static or otherwise shared variable that should be
//! initialized later and never changed again. These implementations may not be thread-safe (the
//! thread-unsafe parts are marked as `unsafe`) because they are both lockless (no mutexes) and
//! generic (no atomic values).
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicBool, Ordering};
/// This struct represents a piece of data whose value should only ever be set once. It behaves just
/// like the `Once` from the `spin` crate except that it doesn't use a spinlock. As a result, it
/// lacks protection from simultaneous writes. Its use requires an `unsafe` block and should ensure
/// that no two threads could attempt to initialize it at the same time with different values.
///
/// The motivation for making this struct without synchronization primitives is so that if one core
/// stops in the middle of the initialization to run a hypervisor, a second core will be able to do
/// the initialization itself instead of blocking, which should improve the boot time in such systems.
#[derive(Debug)]
pub struct Once<T> {
// Holds the return value of `call_once`, or `None` if it hasn't returned yet. We use an `Option` here
// instead of `mem::uninitialized` in order to guarantee that overwritten values are dropped.
value: UnsafeCell<Option<T>>,
// We use a separate AtomicBool instead of just making `value` an `Option` because we can't guarantee
// that `Option<T>` will change from `None` to `Some` atomically.
finished: AtomicBool
}
unsafe impl<T: Send> Send for Once<T> {}
unsafe impl<T: Sync> Sync for Once<T> {}
impl<T> Once<T> {
/// Creates a new `Once` value.
pub const fn new() -> Once<T> {
Once {
value: UnsafeCell::new(None),
finished: AtomicBool::new(false)
}
}
/// Performs an initialization routine once. The given closure will be executed if `call_once` has
/// never finished. If it has finished before, the closure will *not* be executed.
///
/// This method will *not* block the calling thread if another initialization routine is already
/// running. Instead, both will run in parallel with no protection from concurrent writes.
///
/// When this method returns, it is guaranteed that some initialization has run and completed (it
/// may or may not be the closure specified). The returned reference will point to the result from
/// the closure that was run.
///
/// # Safety
/// This method is marked as `unsafe` because it allows data races. If it is possible for two
/// threads to call this method before either of them has finished the initialization, then the
/// calling code must guarantee that both initializations will result in exactly the same value
/// and that it will never be changed until *all* initializations are complete. Otherwise, this
/// function's behavior is undefined, since it returns a reference to a value in an undefined
/// state.
pub unsafe fn call_once<F>(&self, builder: F) -> &T
where F: FnOnce() -> T
{
if self.finished.load(Ordering::Acquire) {
self.force_get() // Value has already been created
} else {
// Save the value for later and then return it.
*self.value.get() = Some(builder());
self.finished.store(true, Ordering::Release);
self.force_get()
}
}
/// Returns a pointer to the calculated value iff the `Once` has already been initialized.
pub fn try_get(&self) -> Option<&T> {
if self.finished.load(Ordering::Acquire) {
unsafe { Some(self.force_get()) }
} else {
None
}
}
// Returns `&v`, where `self.value` constains `Some(v)`. This can produce undefined behavior if
// `self.value` contains `None`.
unsafe fn force_get(&self) -> &T {
match &*self.value.get() {
None => unreachable_debug!(
"This function is private and is only called after a value has been created."
),
Some(v) => &v
}
}
}
/// This macro makes a wrapper for the `Once` struct that can be given an initializer expression
/// when it's defined instead of when it's used. It fills the role of the `lazy_static` crate
/// without requiring either `std` or the `spin` crate.
///
/// Use of this macro requires an `unsafe` block. It's part of the definition. The reason is that
/// we can't prevent data races and the macro can't guarantee that the values given in the
/// initializers are actually constant.
#[macro_export]
macro_rules! lazy_static {
($(unsafe { $($(#[$attr:meta])* $vis:vis static ref $var:ident : $type:ty = $initializer:expr ;)* })*) => {
$(
$(
#[allow(non_camel_case_types)]
#[doc(hidden)]
$vis struct $var {
once: $crate::once::Once<$type>
}
impl ::core::ops::Deref for $var {
type Target = $type;
fn deref(&self) -> &Self::Target {
unsafe { self.once.call_once(|| $initializer) }
}
}
$(#[$attr])*
$vis static $var: $var = $var { once: $crate::once::Once::new() };
)*
)*
}
}
| 48.536232 | 112 | 0.641535 |
9c4b4b46317736bd0e4d5908666b962c64adbe8c | 1,984 | /// Information requested by SQLGetInfo
#[repr(u16)]
#[allow(non_camel_case_types)]
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum InfoType {
SQL_MAX_DRIVER_CONNECTIONS = 0,
SQL_MAX_CONCURRENT_ACTIVITIES = 1,
SQL_DATA_SOURCE_NAME = 2,
// SQL_FETCH_DIRECTION = 8, Deprecated in ODBC 3
SQL_SERVER_NAME = 13,
SQL_SEARCH_PATTERN_ESCAPE = 14,
SQL_DBMS_NAME = 17,
SQL_DBMS_VER = 18,
SQL_ACCESSIBLE_TABLES = 19,
SQL_ACCESSIBLE_PROCEDURES = 20,
SQL_CURSOR_COMMIT_BEHAVIOR = 23,
SQL_DATA_SOURCE_READ_ONLY = 25,
SQL_DEFAULT_TXN_ISOLATION = 26,
SQL_IDENTIFIER_CASE = 28,
SQL_IDENTIFIER_QUOTE_CHAR = 29,
SQL_MAX_COLUMN_NAME_LEN = 30,
SQL_MAX_CURSOR_NAME_LEN = 31,
SQL_MAX_SCHEMA_NAME_LEN = 32,
SQL_MAX_CATALOG_NAME_LEN = 34,
SQL_MAX_TABLE_NAME_LEN = 35,
// SQL_SCROLL_CONCURRENCY = 43, deprecated in ODBC 3
SQL_TRANSACTION_CAPABLE = 46,
SQL_USER_NAME = 47,
SQL_TRANSACTION_ISOLATION_OPTION = 72,
SQL_INTEGRITY = 73,
SQL_GETDATA_EXTENSIONS = 81,
SQL_NULL_COLLATION = 85,
SQL_ALTER_TABLE = 86,
SQL_ORDER_BY_COLUMNS_IN_SELECT = 90,
SQL_SPECIAL_CHARACTERS = 94,
SQL_MAX_COLUMNS_IN_GROUP_BY = 97,
SQL_MAX_COLUMNS_IN_INDEX = 98,
SQL_MAX_COLUMNS_IN_ORDER_BY = 99,
SQL_MAX_COLUMNS_IN_SELECT = 100,
SQL_MAX_COLUMNS_IN_TABLE = 101,
SQL_MAX_INDEX_SIZE = 102,
SQL_MAX_ROW_SIZE = 104,
SQL_MAX_STATEMENT_LEN = 105,
SQL_MAX_TABLES_IN_SELECT = 106,
SQL_MAX_USER_NAME_LEN = 107,
SQL_OUTER_JOIN_CAPABILITIES = 115,
SQL_XOPEN_CLI_YEAR = 10000,
SQL_CURSOR_SENSITIVITY = 10001,
SQL_DESCRIBE_PARAMETER = 10002,
SQL_CATALOG_NAME = 10003,
SQL_COLLATION_SEQ = 10004,
SQL_MAX_IDENTIFIER_LEN = 10005,
SQL_ASYNC_MODE = 10021,
SQL_MAX_ASYNC_CONCURRENT_STATEMENTS = 10022,
SQL_ASYNC_DBC_FUNCTIONS = 10023,
SQL_DRIVER_AWARE_POOLING_SUPPORTED = 10024,
SQL_ASYNC_NOTIFICATION = 10025,
}
pub use self::InfoType::*;
| 33.066667 | 56 | 0.731855 |
64af9effc34fc78e87c075df9bf024a1ac59d421 | 1,289 | use super::*;
use polars::export::chrono::prelude::*;
#[test]
#[cfg(all(
feature = "temporal",
feature = "dtype-date",
feature = "dynamic_groupby"
))]
fn test_groupby_dynamic_week_bounds() -> Result<()> {
let start = NaiveDate::from_ymd(2022, 2, 1).and_hms(0, 0, 0);
let stop = NaiveDate::from_ymd(2022, 2, 14).and_hms(0, 0, 0);
let range = date_range(
"dt",
start,
stop,
Duration::parse("1d"),
ClosedWindow::Left,
TimeUnit::Milliseconds,
)
.into_series();
let a = Int32Chunked::full("a", 1, range.len());
let df = df![
"dt" => range,
"a" => a
]?;
let out = df
.lazy()
.groupby_dynamic(
[],
DynamicGroupOptions {
index_column: "dt".into(),
every: Duration::parse("1w"),
period: Duration::parse("1w"),
offset: Duration::parse("0w"),
closed_window: ClosedWindow::Left,
truncate: false,
include_boundaries: true,
},
)
.agg([col("a").sum()])
.collect()?;
let a = out.column("a")?;
assert_eq!(a.get(0), AnyValue::Int32(7));
assert_eq!(a.get(1), AnyValue::Int32(6));
Ok(())
}
| 25.78 | 65 | 0.494182 |
3834365c022c43a519f09843dc833e940228f4e2 | 3,639 | //! Represent `reduced` transaction, i.e. unsigned transaction where each unsigned input
//! is augmented with ReducedInput which contains a script reduction result.
use super::UnsignedTransaction;
use crate::box_coll::ErgoBoxes;
use crate::ergo_state_ctx::ErgoStateContext;
use crate::error_conversion::to_js;
use ergo_lib::chain::transaction::reduced::reduce_tx;
use ergo_lib::ergotree_ir::serialization::SigmaSerializable;
use ergo_lib::ergotree_ir::sigma_protocol::sigma_boolean::SigmaBoolean;
use wasm_bindgen::prelude::*;
/// Propositions list(public keys)
#[wasm_bindgen]
pub struct Propositions(pub(crate) Vec<SigmaBoolean>);
#[wasm_bindgen]
impl Propositions {
/// Create empty proposition holder
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Propositions(vec![])
}
/// Adding new proposition
pub fn add_proposition_from_byte(&mut self, proposition: Vec<u8>) -> Result<(), JsValue> {
self.0
.push(SigmaBoolean::sigma_parse_bytes(&proposition).map_err(to_js)?);
Ok(())
}
}
/// Represent `reduced` transaction, i.e. unsigned transaction where each unsigned input
/// is augmented with ReducedInput which contains a script reduction result.
/// After an unsigned transaction is reduced it can be signed without context.
/// Thus, it can be serialized and transferred for example to Cold Wallet and signed
/// in an environment where secrets are known.
/// see EIP-19 for more details -
/// <https://github.com/ergoplatform/eips/blob/f280890a4163f2f2e988a0091c078e36912fc531/eip-0019.md>
#[wasm_bindgen]
#[derive(PartialEq, Debug, Clone)]
pub struct ReducedTransaction(ergo_lib::chain::transaction::reduced::ReducedTransaction);
#[wasm_bindgen]
impl ReducedTransaction {
/// Returns `reduced` transaction, i.e. unsigned transaction where each unsigned input
/// is augmented with ReducedInput which contains a script reduction result.
pub fn from_unsigned_tx(
unsigned_tx: &UnsignedTransaction,
boxes_to_spend: &ErgoBoxes,
data_boxes: &ErgoBoxes,
state_context: &ErgoStateContext,
) -> Result<ReducedTransaction, JsValue> {
let boxes_to_spend = boxes_to_spend.clone().into();
let data_boxes = data_boxes.clone().into();
let tx_context = ergo_lib::wallet::signing::TransactionContext::new(
unsigned_tx.0.clone(),
boxes_to_spend,
data_boxes,
)
.map_err(to_js)?;
reduce_tx(tx_context, &state_context.clone().into())
.map_err(to_js)
.map(ReducedTransaction::from)
}
/// Returns serialized bytes or fails with error if cannot be serialized
pub fn sigma_serialize_bytes(&self) -> Result<Vec<u8>, JsValue> {
self.0.sigma_serialize_bytes().map_err(to_js)
}
/// Parses ReducedTransaction or fails with error
pub fn sigma_parse_bytes(data: Vec<u8>) -> Result<ReducedTransaction, JsValue> {
ergo_lib::chain::transaction::reduced::ReducedTransaction::sigma_parse_bytes(&data)
.map(ReducedTransaction)
.map_err(to_js)
}
/// Returns the unsigned transaction
pub fn unsigned_tx(&self) -> UnsignedTransaction {
self.0.unsigned_tx.clone().into()
}
}
impl From<ergo_lib::chain::transaction::reduced::ReducedTransaction> for ReducedTransaction {
fn from(t: ergo_lib::chain::transaction::reduced::ReducedTransaction) -> Self {
ReducedTransaction(t)
}
}
impl From<ReducedTransaction> for ergo_lib::chain::transaction::reduced::ReducedTransaction {
fn from(t: ReducedTransaction) -> Self {
t.0
}
}
| 37.515464 | 100 | 0.70404 |
615ca5a24ccb06c13db57a774e20e1571032ca97 | 2,243 | use serde::{Deserialize, Serialize};
use crate::{
DynamicRegistrationClientCapabilities, Range, StaticRegistrationOptions,
TextDocumentPositionParams, TextDocumentRegistrationOptions, WorkDoneProgressOptions,
WorkDoneProgressParams,
};
pub type LinkedEditingRangeClientCapabilities = DynamicRegistrationClientCapabilities;
#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct LinkedEditingRangeOptions {
#[serde(flatten)]
pub work_done_progress_options: WorkDoneProgressOptions,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct LinkedEditingRangeRegistrationOptions {
#[serde(flatten)]
pub text_document_registration_options: TextDocumentRegistrationOptions,
#[serde(flatten)]
pub linked_editing_range_options: LinkedEditingRangeOptions,
#[serde(flatten)]
pub static_registration_options: StaticRegistrationOptions,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(untagged)]
pub enum LinkedEditingRangeServerCapabilities {
Simple(bool),
Options(LinkedEditingRangeOptions),
RegistrationOptions(LinkedEditingRangeRegistrationOptions),
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct LinkedEditingRangeParams {
#[serde(flatten)]
pub text_document_position_params: TextDocumentPositionParams,
#[serde(flatten)]
pub work_done_progress_params: WorkDoneProgressParams,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct LinkedEditingRanges {
/// A list of ranges that can be renamed together. The ranges must have
/// identical length and contain identical text content. The ranges cannot overlap.
pub ranges: Vec<Range>,
/// An optional word pattern (regular expression) that describes valid contents for
/// the given ranges. If no pattern is provided, the client configuration's word
/// pattern will be used.
#[serde(skip_serializing_if = "Option::is_none")]
pub word_pattern: Option<String>,
}
| 36.177419 | 90 | 0.747659 |
71f957a70bb60a25d9d5a4cff9775f4b5d8f20fd | 5,402 | // create tests where SslConnector is created, CA is created and checked against that CA
use openssl::ssl::SslAcceptor;
use openssl::ssl::SslMethod;
use crate::*;
use std::net::{TcpListener, TcpStream};
use std::thread;
use openssl::ssl::SslConnector;
use tempfile::tempdir;
use std::path::Path;
use std::fs::OpenOptions;
use std::io::Write;
use clap::App;
use crate::args::parser_name_builder;
use crate::args::parser_not_after_before;
use crate::args::matches_name_builder;
use crate::args::matches_not_after_before;
use crate::args::ParseError;
use crate::args::parser_csr_extensions;
use crate::args::matches_csr_extensions;
use crate::args::run_csr_extensions;
use crate::args::CsrExt;
fn priv_to_pub(server_key: &PKey<Private>) -> PKey<Public> {
return PKey::<Public>::public_key_from_pem(server_key.public_key_to_pem().unwrap().as_ref()).unwrap();
}
fn create_name_validity(name: &str) -> Result<(X509Name, (Option<Asn1Time>, Option<Asn1Time>)), ParseError> {
let app = App::new("asd");
let app = parser_name_builder(app);
let app = parser_not_after_before(app);
let matches = app.get_matches_from(vec![
"", "-N", name
]);
Ok((matches_name_builder(&matches)?, matches_not_after_before(&matches)?))
}
fn create_server(name: &str) -> Result<(Vec<CsrExt>, X509Name, (Option<Asn1Time>, Option<Asn1Time>)), ParseError> {
let app = App::new("asd");
let app = parser_name_builder(app);
let app = parser_csr_extensions(app);
let app = parser_not_after_before(app);
let matches = app.get_matches_from(vec![
"", "-N", name, "--ext-server", "--san-dns", "localhost",
]);
Ok((matches_csr_extensions(&matches)?, matches_name_builder(&matches)?, matches_not_after_before(&matches)?))
}
fn create_client(name: &str) -> Result<(Vec<CsrExt>, X509Name, (Option<Asn1Time>, Option<Asn1Time>)), ParseError> {
let app = App::new("asd");
let app = parser_name_builder(app);
let app = parser_csr_extensions(app);
let app = parser_not_after_before(app);
let matches = app.get_matches_from(vec![
"", "-N", name, "--ext-client"
]);
Ok((matches_csr_extensions(&matches)?, matches_name_builder(&matches)?, matches_not_after_before(&matches)?))
}
#[test]
fn test_initialization() {
let dir = tempdir().unwrap();
let (name, val) = create_name_validity("ca").unwrap();
let key = build_privkey().unwrap();
let ca = build_ca_cert(
&key,
&name,
&val,
).unwrap();
let (exts, name, _) = create_server("localhost").unwrap();
let server_key = build_privkey().unwrap();
let server_csr = build_ca_req(
&server_key,
&name,
|cert_builder| {
let mut extensions = Stack::<X509Extension>::new()?;
run_csr_extensions(&exts, &mut extensions, &cert_builder)?;
Ok(())
}
).unwrap();
let (exts, name, val) = create_client("localhost").unwrap();
let client_key = build_privkey().unwrap();
let client_csr = build_ca_req(
&client_key,
&name,
|cert_builder| {
let mut extensions = Stack::<X509Extension>::new()?;
run_csr_extensions(&exts, &mut extensions, &cert_builder)?;
Ok(())
}
).unwrap();
let server_key_pub = priv_to_pub(&server_key);
let client_key_pub = priv_to_pub(&client_key);
let server_cert = build_ca_signed_cert(
&ca,
&key,
&server_key_pub,
&server_csr,
&val,
|_| {Ok(())}
).unwrap();
let client_cert = build_ca_signed_cert(
&ca,
&key,
&client_key_pub,
&client_csr,
&val,
|_| {Ok(())}
).unwrap();
let ca_file_name = dir.path().join(Path::new("ca"));
let ca_file_name = dbg!(ca_file_name);
OpenOptions::new().write(true).create_new(true).open(&ca_file_name).unwrap().write(&ca.to_pem().unwrap()).unwrap();
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
acceptor.set_certificate(&server_cert).unwrap();
acceptor.set_private_key(&server_key).unwrap();
acceptor.set_ca_file(&ca_file_name).unwrap();
acceptor.check_private_key().unwrap();
let acceptor = acceptor.build();
let listener = TcpListener::bind("0.0.0.0:8443").unwrap();
let mut connector = SslConnector::builder(SslMethod::tls()).unwrap();
connector.set_certificate(&client_cert.clone()).unwrap();
connector.set_private_key(&client_key).unwrap();
connector.set_ca_file(&ca_file_name).unwrap();
connector.check_private_key().unwrap();
let connector = connector.build();
thread::spawn(move || {
let stream = TcpStream::connect("localhost:8443").unwrap();
stream.set_nodelay(true).unwrap();
//stream.set_nonblocking(true).unwrap();
let mut stream = connector.connect("localhost", stream).unwrap();
let buff = vec![1, 2, 3];
stream.write(buff.as_ref()).unwrap();
});
for stream in listener.incoming() {
let stream = stream.unwrap();
stream.set_nodelay(true).unwrap();
//stream.set_nonblocking(true).unwrap();
let mut stream = acceptor.accept(stream).unwrap();
let mut buff = Vec::<u8>::with_capacity(128);
let a = stream.read_to_end(&mut buff).unwrap();
assert_eq!(a, 3);
break;
}
} | 31.045977 | 119 | 0.640689 |
0e6215874b81495ac0635c42a62ad1632ce477e8 | 30,157 | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use fnv::FnvHashMap;
use futures::{future, sync::oneshot, task, Async, Future, Poll, IntoFuture};
use parking_lot::Mutex;
use {Multiaddr, MuxedTransport, SwarmController, Transport};
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
use std::mem;
use std::sync::{Arc, Weak, atomic::AtomicUsize, atomic::Ordering};
use transport::interruptible::Interrupt;
/// Storage for a unique connection with a remote.
pub struct UniqueConnec<T> {
inner: Arc<Mutex<UniqueConnecInner<T>>>,
}
enum UniqueConnecInner<T> {
/// The `UniqueConnec` was created, but nothing is in it.
Empty,
/// We started dialing, but no response has been obtained so far.
Pending {
/// Tasks that need to be awakened when the content of this object is set.
tasks_waiting: FnvHashMap<usize, task::Task>,
/// Future that represents when `tie_*` should have been called.
// TODO: Send + Sync bound is meh
dial_fut: Box<Future<Item = (), Error = IoError> + Send + Sync>,
/// Dropping this object will automatically interrupt the dial, which is very useful if
/// we clear or drop the `UniqueConnec`.
interrupt: Interrupt,
},
/// The value of this unique connec has been set.
/// Can only transition to `Empty` when the future has expired.
Full {
/// Content of the object.
value: T,
/// Sender to trigger if the content gets cleared.
on_clear: oneshot::Sender<()>,
},
/// The `dial_fut` has errored.
Errored(IoError),
}
impl<T> UniqueConnec<T> {
/// Builds a new empty `UniqueConnec`.
#[inline]
pub fn empty() -> Self {
UniqueConnec {
inner: Arc::new(Mutex::new(UniqueConnecInner::Empty)),
}
}
/// Builds a new `UniqueConnec` that contains a value.
#[inline]
pub fn with_value(value: T) -> Self {
let (on_clear, _) = oneshot::channel();
UniqueConnec {
inner: Arc::new(Mutex::new(UniqueConnecInner::Full { value, on_clear })),
}
}
/// Instantly returns the value from the object if there is any.
pub fn poll(&self) -> Option<T>
where T: Clone,
{
let inner = self.inner.lock();
if let UniqueConnecInner::Full { ref value, .. } = &*inner {
Some(value.clone())
} else {
None
}
}
/// Loads the value from the object.
///
/// If the object is empty or has errored earlier, dials the given multiaddress with the
/// given transport.
///
/// The closure of the `swarm` is expected to call `tie_*()` on the `UniqueConnec`. Failure
/// to do so will make the `UniqueConnecFuture` produce an error.
///
/// One critical property of this method, is that if a connection incomes and `tie_*` is
/// called, then it will be returned by the returned future.
#[inline]
pub fn dial<S, F, Du>(&self, swarm: &SwarmController<S, F>, multiaddr: &Multiaddr,
transport: Du) -> UniqueConnecFuture<T>
where T: Clone + Send + 'static, // TODO: 'static :-/
Du: Transport + 'static, // TODO: 'static :-/
Du::Output: Into<S::Output>,
Du::Dial: Send,
Du::MultiaddrFuture: Send,
S: Clone + MuxedTransport,
S::Dial: Send,
S::Listener: Send,
S::ListenerUpgrade: Send,
S::Output: Send,
S::MultiaddrFuture: Send,
F: 'static,
{
self.dial_inner(swarm, multiaddr, transport, true)
}
/// Same as `dial`, except that the future will produce an error if an earlier attempt to dial
/// has errored.
#[inline]
pub fn dial_if_empty<S, F, Du>(&self, swarm: &SwarmController<S, F>, multiaddr: &Multiaddr,
transport: Du) -> UniqueConnecFuture<T>
where T: Clone + Send + 'static, // TODO: 'static :-/
Du: Transport + 'static, // TODO: 'static :-/
Du::Output: Into<S::Output>,
Du::Dial: Send,
Du::MultiaddrFuture: Send,
S: Clone + MuxedTransport,
S::Dial: Send,
S::Listener: Send,
S::ListenerUpgrade: Send,
S::Output: Send,
S::MultiaddrFuture: Send,
F: 'static,
{
self.dial_inner(swarm, multiaddr, transport, false)
}
/// Inner implementation of `dial_*`.
fn dial_inner<S, F, Du>(&self, swarm: &SwarmController<S, F>, multiaddr: &Multiaddr,
transport: Du, dial_if_err: bool) -> UniqueConnecFuture<T>
where T: Clone + Send + 'static, // TODO: 'static :-/
Du: Transport + 'static, // TODO: 'static :-/
Du::Output: Into<S::Output>,
Du::Dial: Send,
Du::MultiaddrFuture: Send,
S: Clone + MuxedTransport,
S::Dial: Send,
S::Listener: Send,
S::ListenerUpgrade: Send,
S::Output: Send,
S::MultiaddrFuture: Send,
F: 'static,
{
let mut inner = self.inner.lock();
match &*inner {
UniqueConnecInner::Empty => (),
UniqueConnecInner::Errored(_) if dial_if_err => (),
_ => return UniqueConnecFuture { inner: Arc::downgrade(&self.inner) },
};
let weak_inner = Arc::downgrade(&self.inner);
let (transport, interrupt) = transport.interruptible();
let dial_fut = swarm.dial_then(multiaddr.clone(), transport,
move |val: Result<(), IoError>| {
let inner = match weak_inner.upgrade() {
Some(i) => i,
None => return val
};
let mut inner = inner.lock();
if let UniqueConnecInner::Full { .. } = *inner {
return val;
}
let new_val = UniqueConnecInner::Errored(match val {
Ok(()) => IoError::new(IoErrorKind::ConnectionRefused,
"dialing has succeeded but tie_* hasn't been called"),
Err(ref err) => IoError::new(err.kind(), err.to_string()),
});
match mem::replace(&mut *inner, new_val) {
UniqueConnecInner::Pending { tasks_waiting, .. } => {
for task in tasks_waiting {
task.1.notify();
}
},
_ => ()
};
val
});
let dial_fut = dial_fut
.map_err(|_| IoError::new(IoErrorKind::Other, "multiaddress not supported"))
.into_future()
.flatten();
*inner = UniqueConnecInner::Pending {
tasks_waiting: Default::default(),
dial_fut: Box::new(dial_fut),
interrupt,
};
UniqueConnecFuture { inner: Arc::downgrade(&self.inner) }
}
/// Puts `value` inside the object.
/// Additionally, the `UniqueConnec` will be tied to the `until` future. When the future drops
/// or finishes, the `UniqueConnec` is automatically cleared. If the `UniqueConnec` is cleared
/// by the user, the future automatically stops.
/// The returned future is an adjusted version of that same future.
///
/// If the object already contains something, then `until` is dropped and a dummy future that
/// immediately ends is returned.
pub fn tie_or_stop<F>(&self, value: T, until: F) -> impl Future<Item = (), Error = F::Error>
where F: Future<Item = ()>
{
self.tie_inner(value, until, false)
}
/// Same as `tie_or_stop`, except that is if the object already contains something, then
/// `until` is returned immediately and can live in parallel.
pub fn tie_or_passthrough<F>(&self, value: T, until: F) -> impl Future<Item = (), Error = F::Error>
where F: Future<Item = ()>
{
self.tie_inner(value, until, true)
}
/// Inner implementation of `tie_*`.
fn tie_inner<F>(&self, value: T, until: F, pass_through: bool) -> impl Future<Item = (), Error = F::Error>
where F: Future<Item = ()>
{
let mut tasks_to_notify = Default::default();
let mut inner = self.inner.lock();
let (on_clear, on_clear_rx) = oneshot::channel();
match mem::replace(&mut *inner, UniqueConnecInner::Full { value, on_clear }) {
UniqueConnecInner::Empty => {},
UniqueConnecInner::Errored(_) => {},
UniqueConnecInner::Pending { tasks_waiting, .. } => {
tasks_to_notify = tasks_waiting;
},
old @ UniqueConnecInner::Full { .. } => {
// Keep the old value.
*inner = old;
if pass_through {
return future::Either::B(future::Either::A(until));
} else {
return future::Either::B(future::Either::B(future::ok(())));
}
},
};
drop(inner);
struct Cleaner<T>(Weak<Mutex<UniqueConnecInner<T>>>);
impl<T> Drop for Cleaner<T> {
#[inline]
fn drop(&mut self) {
if let Some(inner) = self.0.upgrade() {
*inner.lock() = UniqueConnecInner::Empty;
}
}
}
let cleaner = Cleaner(Arc::downgrade(&self.inner));
// The mutex is unlocked when we notify the pending tasks.
for task in tasks_to_notify {
task.1.notify();
}
let fut = until
.select(on_clear_rx.then(|_| Ok(())))
.map(|((), _)| ())
.map_err(|(err, _)| err)
.then(move |val| {
drop(cleaner); // Make sure that `cleaner` gets called there.
val
});
future::Either::A(fut)
}
/// Clears the content of the object.
///
/// Has no effect if the content is empty or pending.
/// If the node was full, calling `clear` will stop the future returned by `tie_*`.
pub fn clear(&self) {
let mut inner = self.inner.lock();
match mem::replace(&mut *inner, UniqueConnecInner::Empty) {
UniqueConnecInner::Empty => {},
UniqueConnecInner::Errored(_) => {},
pending @ UniqueConnecInner::Pending { .. } => {
*inner = pending;
},
UniqueConnecInner::Full { on_clear, .. } => {
// TODO: Should we really replace the `Full` with an `Empty` here? What about
// letting dropping the future clear the connection automatically? Otherwise
// it is possible that the user dials before the future gets dropped, in which
// case the future dropping will set the value to `Empty`. But on the other hand,
// it is expected that `clear()` is instantaneous and if it is followed with
// `dial()` then it should dial.
let _ = on_clear.send(());
},
};
}
/// Returns the state of the object.
///
/// Note that this can be racy, as the object can be used at the same time. In other words,
/// the returned value may no longer reflect the actual state.
pub fn state(&self) -> UniqueConnecState {
match *self.inner.lock() {
UniqueConnecInner::Empty => UniqueConnecState::Empty,
UniqueConnecInner::Errored(_) => UniqueConnecState::Errored,
UniqueConnecInner::Pending { .. } => UniqueConnecState::Pending,
UniqueConnecInner::Full { .. } => UniqueConnecState::Full,
}
}
/// Returns true if the object has a pending or active connection. Returns false if the object
/// is empty or the connection has errored earlier.
#[inline]
pub fn is_alive(&self) -> bool {
match self.state() {
UniqueConnecState::Empty => false,
UniqueConnecState::Errored => false,
UniqueConnecState::Pending => true,
UniqueConnecState::Full => true,
}
}
}
impl<T> Clone for UniqueConnec<T> {
#[inline]
fn clone(&self) -> UniqueConnec<T> {
UniqueConnec {
inner: self.inner.clone(),
}
}
}
impl<T> Default for UniqueConnec<T> {
#[inline]
fn default() -> Self {
UniqueConnec::empty()
}
}
impl<T> Drop for UniqueConnec<T> {
fn drop(&mut self) {
// Notify the waiting futures if we are the last `UniqueConnec`.
if let Some(inner) = Arc::get_mut(&mut self.inner) {
match *inner.get_mut() {
UniqueConnecInner::Pending { ref mut tasks_waiting, .. } => {
for task in tasks_waiting.drain() {
task.1.notify();
}
},
_ => ()
}
}
}
}
/// Future returned by `UniqueConnec::dial()`.
#[must_use = "futures do nothing unless polled"]
pub struct UniqueConnecFuture<T> {
inner: Weak<Mutex<UniqueConnecInner<T>>>,
}
impl<T> Future for UniqueConnecFuture<T>
where T: Clone
{
type Item = T;
type Error = IoError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let inner = match self.inner.upgrade() {
Some(inner) => inner,
// All the `UniqueConnec` have been destroyed.
None => return Err(IoErrorKind::ConnectionAborted.into()),
};
let mut inner = inner.lock();
match mem::replace(&mut *inner, UniqueConnecInner::Empty) {
UniqueConnecInner::Empty => {
// This can happen if `tie_*()` is called, and the future expires before the
// future returned by `dial()` gets polled. This means that the connection has been
// closed.
Err(IoErrorKind::ConnectionAborted.into())
},
UniqueConnecInner::Pending { mut tasks_waiting, mut dial_fut, interrupt } => {
match dial_fut.poll() {
Ok(Async::Ready(())) => {
// This happens if we successfully dialed a remote, but the callback
// doesn't call `tie_*`. This can be a logic error by the user,
// but could also indicate that the user decided to filter out this
// connection for whatever reason.
*inner = UniqueConnecInner::Errored(IoErrorKind::ConnectionAborted.into());
Err(IoErrorKind::ConnectionAborted.into())
},
Ok(Async::NotReady) => {
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
task_local! {
static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed)
}
tasks_waiting.insert(TASK_ID.with(|&k| k), task::current());
*inner = UniqueConnecInner::Pending { tasks_waiting, dial_fut, interrupt };
Ok(Async::NotReady)
}
Err(err) => {
let tr = IoError::new(err.kind(), err.to_string());
*inner = UniqueConnecInner::Errored(err);
Err(tr)
},
}
},
UniqueConnecInner::Full { value, on_clear } => {
*inner = UniqueConnecInner::Full {
value: value.clone(),
on_clear
};
Ok(Async::Ready(value))
},
UniqueConnecInner::Errored(err) => {
let tr = IoError::new(err.kind(), err.to_string());
*inner = UniqueConnecInner::Errored(err);
Err(tr)
},
}
}
}
/// State of a `UniqueConnec`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum UniqueConnecState {
/// The object is empty.
Empty,
/// `dial` has been called and we are waiting for `tie_*` to be called.
Pending,
/// `tie_*` has been called.
Full,
/// The future returned by the closure of `dial` has errored or has finished before
/// `tie_*` has been called.
Errored,
}
#[cfg(test)]
mod tests {
use futures::{future, sync::oneshot, Future, Stream};
use transport::DeniedTransport;
use std::io::Error as IoError;
use std::sync::{Arc, atomic};
use std::time::Duration;
use {UniqueConnec, UniqueConnecState};
use {swarm, transport, Transport};
use tokio::runtime::current_thread;
use tokio_timer;
#[test]
fn basic_working() {
// Checks the basic working of the `UniqueConnec`.
let (tx, rx) = transport::connector();
let unique_connec = UniqueConnec::empty();
let unique_connec2 = unique_connec.clone();
assert_eq!(unique_connec.state(), UniqueConnecState::Empty);
let (swarm_ctrl, swarm_future) = swarm(rx.with_dummy_muxing(), |_, _| {
// Note that this handles both the dial and the listen.
assert!(unique_connec2.is_alive());
unique_connec2.tie_or_stop(12, future::empty())
});
swarm_ctrl.listen_on("/memory".parse().unwrap()).unwrap();
let dial_success = unique_connec
.dial(&swarm_ctrl, &"/memory".parse().unwrap(), tx)
.map(|val| { assert_eq!(val, 12); });
assert_eq!(unique_connec.state(), UniqueConnecState::Pending);
let future = dial_success.select(swarm_future.for_each(|_| Ok(()))).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
assert_eq!(unique_connec.state(), UniqueConnecState::Full);
}
#[test]
fn invalid_multiaddr_produces_error() {
// Tests that passing an invalid multiaddress generates an error.
let unique = UniqueConnec::empty();
assert_eq!(unique.state(), UniqueConnecState::Empty);
let unique2 = unique.clone();
let (swarm_ctrl, _swarm_fut) = swarm(DeniedTransport, |_, _| {
unique2.tie_or_stop((), future::empty())
});
let fut = unique.dial(&swarm_ctrl, &"/ip4/1.2.3.4".parse().unwrap(), DeniedTransport);
assert!(fut.wait().is_err());
assert_eq!(unique.state(), UniqueConnecState::Errored);
}
#[test]
fn tie_or_stop_stops() {
// Tests that `tie_or_stop` destroys additional futures passed to it.
let (tx, rx) = transport::connector();
let unique_connec = UniqueConnec::empty();
let unique_connec2 = unique_connec.clone();
// This channel is used to detect whether the future has been dropped.
let (msg_tx, msg_rx) = oneshot::channel();
let mut num_connec = 0;
let mut msg_rx = Some(msg_rx);
let (swarm_ctrl1, swarm_future1) = swarm(rx.with_dummy_muxing(), move |_, _| {
num_connec += 1;
if num_connec == 1 {
unique_connec2.tie_or_stop(12, future::Either::A(future::empty()))
} else {
let fut = msg_rx.take().unwrap().map_err(|_| panic!());
unique_connec2.tie_or_stop(13, future::Either::B(fut))
}
});
swarm_ctrl1.listen_on("/memory".parse().unwrap()).unwrap();
let (swarm_ctrl2, swarm_future2) = swarm(tx.clone().with_dummy_muxing(), move |_, _| {
future::empty()
});
let dial_success = unique_connec
.dial(&swarm_ctrl2, &"/memory".parse().unwrap(), tx.clone())
.map(|val| { assert_eq!(val, 12); })
.inspect({
let c = unique_connec.clone();
move |_| { assert!(c.is_alive()); }
})
.and_then(|_| {
tokio_timer::sleep(Duration::from_secs(1))
.map_err(|_| unreachable!())
})
.and_then(move |_| {
swarm_ctrl2.dial("/memory".parse().unwrap(), tx)
.unwrap_or_else(|_| panic!())
})
.inspect({
let c = unique_connec.clone();
move |_| {
assert_eq!(c.poll(), Some(12)); // Not 13
assert!(msg_tx.send(()).is_err());
}
});
let future = dial_success
.select(swarm_future2.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err)
.select(swarm_future1.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
assert!(unique_connec.is_alive());
}
#[test]
fn tie_or_passthrough_passes_through() {
// Tests that `tie_or_passthrough` doesn't delete additional futures passed to it when
// it is already full, and doesn't gets its value modified when that happens.
let (tx, rx) = transport::connector();
let unique_connec = UniqueConnec::empty();
let unique_connec2 = unique_connec.clone();
let mut num = 12;
let (swarm_ctrl, swarm_future) = swarm(rx.with_dummy_muxing(), move |_, _| {
// Note that this handles both the dial and the listen.
let fut = future::empty().then(|_: Result<(), ()>| -> Result<(), IoError> { panic!() });
num += 1;
unique_connec2.tie_or_passthrough(num, fut)
});
swarm_ctrl.listen_on("/memory".parse().unwrap()).unwrap();
let dial_success = unique_connec
.dial(&swarm_ctrl, &"/memory".parse().unwrap(), tx.clone())
.map(|val| { assert_eq!(val, 13); });
swarm_ctrl.dial("/memory".parse().unwrap(), tx)
.unwrap();
let future = dial_success.select(swarm_future.for_each(|_| Ok(()))).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
assert_eq!(unique_connec.poll(), Some(13));
}
#[test]
fn cleared_when_future_drops() {
// Tests that the `UniqueConnec` gets cleared when the future we associate with it gets
// destroyed.
let (tx, rx) = transport::connector();
let unique_connec = UniqueConnec::empty();
let unique_connec2 = unique_connec.clone();
let (msg_tx, msg_rx) = oneshot::channel();
let mut msg_rx = Some(msg_rx);
let (swarm_ctrl1, swarm_future1) = swarm(rx.with_dummy_muxing(), move |_, _| {
future::empty()
});
swarm_ctrl1.listen_on("/memory".parse().unwrap()).unwrap();
let (swarm_ctrl2, swarm_future2) = swarm(tx.clone().with_dummy_muxing(), move |_, _| {
let fut = msg_rx.take().unwrap().map_err(|_| -> IoError { unreachable!() });
unique_connec2.tie_or_stop(12, fut)
});
let dial_success = unique_connec
.dial(&swarm_ctrl2, &"/memory".parse().unwrap(), tx)
.map(|val| { assert_eq!(val, 12); })
.inspect({
let c = unique_connec.clone();
move |_| { assert!(c.is_alive()); }
})
.and_then(|_| {
msg_tx.send(()).unwrap();
tokio_timer::sleep(Duration::from_secs(1))
.map_err(|_| unreachable!())
})
.inspect({
let c = unique_connec.clone();
move |_| { assert!(!c.is_alive()); }
});
let future = dial_success
.select(swarm_future1.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err)
.select(swarm_future2.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
assert!(!unique_connec.is_alive());
}
#[test]
fn future_drops_when_cleared() {
// Tests that the future returned by `tie_or_*` ends when the `UniqueConnec` get cleared.
let (tx, rx) = transport::connector();
let unique_connec = UniqueConnec::empty();
let unique_connec2 = unique_connec.clone();
let (swarm_ctrl1, swarm_future1) = swarm(rx.with_dummy_muxing(), move |_, _| {
future::empty()
});
swarm_ctrl1.listen_on("/memory".parse().unwrap()).unwrap();
let finished = Arc::new(atomic::AtomicBool::new(false));
let finished2 = finished.clone();
let (swarm_ctrl2, swarm_future2) = swarm(tx.clone().with_dummy_muxing(), move |_, _| {
let finished2 = finished2.clone();
unique_connec2.tie_or_stop(12, future::empty()).then(move |v| {
finished2.store(true, atomic::Ordering::Relaxed);
v
})
});
let dial_success = unique_connec
.dial(&swarm_ctrl2, &"/memory".parse().unwrap(), tx)
.map(|val| { assert_eq!(val, 12); })
.inspect({
let c = unique_connec.clone();
move |_| {
assert!(c.is_alive());
c.clear();
assert!(!c.is_alive());
}
})
.and_then(|_| {
tokio_timer::sleep(Duration::from_secs(1))
.map_err(|_| unreachable!())
})
.inspect({
let c = unique_connec.clone();
move |_| {
assert!(finished.load(atomic::Ordering::Relaxed));
assert!(!c.is_alive());
}
});
let future = dial_success
.select(swarm_future1.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err)
.select(swarm_future2.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
assert!(!unique_connec.is_alive());
}
#[test]
fn future_drops_when_destroyed() {
// Tests that the future returned by `tie_or_*` ends when the `UniqueConnec` get dropped.
let (tx, rx) = transport::connector();
let unique_connec = UniqueConnec::empty();
let mut unique_connec2 = Some(unique_connec.clone());
let (swarm_ctrl1, swarm_future1) = swarm(rx.with_dummy_muxing(), move |_, _| {
future::empty()
});
swarm_ctrl1.listen_on("/memory".parse().unwrap()).unwrap();
let finished = Arc::new(atomic::AtomicBool::new(false));
let finished2 = finished.clone();
let (swarm_ctrl2, swarm_future2) = swarm(tx.clone().with_dummy_muxing(), move |_, _| {
let finished2 = finished2.clone();
unique_connec2.take().unwrap().tie_or_stop(12, future::empty()).then(move |v| {
finished2.store(true, atomic::Ordering::Relaxed);
v
})
});
let dial_success = unique_connec
.dial(&swarm_ctrl2, &"/memory".parse().unwrap(), tx)
.map(|val| { assert_eq!(val, 12); })
.inspect(move |_| {
assert!(unique_connec.is_alive());
drop(unique_connec);
})
.and_then(|_| {
tokio_timer::sleep(Duration::from_secs(1))
.map_err(|_| unreachable!())
})
.inspect(move |_| {
assert!(finished.load(atomic::Ordering::Relaxed));
});
let future = dial_success
.select(swarm_future1.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err)
.select(swarm_future2.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
}
#[test]
fn error_if_unique_connec_destroyed_before_future() {
// Tests that the future returned by `dial` returns an error if the `UniqueConnec` no
// longer exists.
let (tx, rx) = transport::connector();
let (swarm_ctrl, swarm_future) = swarm(rx.with_dummy_muxing(), move |_, _| {
future::empty()
});
swarm_ctrl.listen_on("/memory".parse().unwrap()).unwrap();
let unique_connec = UniqueConnec::empty();
let dial_success = unique_connec
.dial(&swarm_ctrl, &"/memory".parse().unwrap(), tx)
.then(|val: Result<(), IoError>| {
assert!(val.is_err());
Ok(())
});
drop(unique_connec);
let future = dial_success
.select(swarm_future.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
}
// TODO: test that dialing is interrupted when UniqueConnec is cleared
// TODO: test that dialing is interrupted when UniqueConnec is dropped
}
| 39.680263 | 110 | 0.547568 |
0aa795eb63659530394a859429d60b7ea0c9f418 | 1,215 | use super::vm;
use crate::types::{ElementType, Table};
#[derive(Debug, Clone)]
pub enum TableElements {
/// This is intended to be a caller-checked Anyfunc.
Anyfunc(Vec<vm::Anyfunc>),
}
#[derive(Debug)]
pub struct TableBacking {
pub elements: TableElements,
pub max: Option<u32>,
}
impl TableBacking {
pub fn new(table: &Table) -> Self {
match table.ty {
ElementType::Anyfunc => {
let initial_table_backing_len = match table.max {
Some(max) => max,
None => table.min,
} as usize;
Self {
elements: TableElements::Anyfunc(vec![
vm::Anyfunc::null();
initial_table_backing_len
]),
max: table.max,
}
}
}
}
pub fn into_vm_table(&mut self) -> vm::LocalTable {
match self.elements {
TableElements::Anyfunc(ref mut funcs) => vm::LocalTable {
base: funcs.as_mut_ptr() as *mut u8,
current_elements: funcs.len(),
capacity: funcs.capacity(),
},
}
}
}
| 26.413043 | 69 | 0.488889 |
017479d267074aee1c1f3b350fd466665dc086f4 | 87,625 | pub(crate) use self::as_encoding_agnostic_metadata_key::AsEncodingAgnosticMetadataKey;
pub(crate) use self::as_metadata_key::AsMetadataKey;
pub(crate) use self::into_metadata_key::IntoMetadataKey;
use super::encoding::{Ascii, Binary, ValueEncoding};
use super::key::{InvalidMetadataKey, MetadataKey};
use super::value::MetadataValue;
use std::marker::PhantomData;
/// A set of gRPC custom metadata entries.
///
/// # Examples
///
/// Basic usage
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// map.insert("x-host", "example.com".parse().unwrap());
/// map.insert("x-number", "123".parse().unwrap());
/// map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"[binary data]"));
///
/// assert!(map.contains_key("x-host"));
/// assert!(!map.contains_key("x-location"));
///
/// assert_eq!(map.get("x-host").unwrap(), "example.com");
///
/// map.remove("x-host");
///
/// assert!(!map.contains_key("x-host"));
/// ```
#[derive(Clone, Debug, Default)]
pub struct MetadataMap {
headers: http::HeaderMap,
}
/// `MetadataMap` entry iterator.
///
/// Yields `KeyAndValueRef` values. The same header name may be yielded
/// more than once if it has more than one associated value.
#[derive(Debug)]
pub struct Iter<'a> {
inner: http::header::Iter<'a, http::header::HeaderValue>,
}
/// Reference to a key and an associated value in a `MetadataMap`. It can point
/// to either an ascii or a binary ("*-bin") key.
#[derive(Debug)]
pub enum KeyAndValueRef<'a> {
/// An ascii metadata key and value.
Ascii(&'a MetadataKey<Ascii>, &'a MetadataValue<Ascii>),
/// A binary metadata key and value.
Binary(&'a MetadataKey<Binary>, &'a MetadataValue<Binary>),
}
/// Reference to a key and an associated value in a `MetadataMap`. It can point
/// to either an ascii or a binary ("*-bin") key.
#[derive(Debug)]
pub enum KeyAndMutValueRef<'a> {
/// An ascii metadata key and value.
Ascii(&'a MetadataKey<Ascii>, &'a mut MetadataValue<Ascii>),
/// A binary metadata key and value.
Binary(&'a MetadataKey<Binary>, &'a mut MetadataValue<Binary>),
}
/// `MetadataMap` entry iterator.
///
/// Yields `(&MetadataKey, &mut value)` tuples. The same header name may be yielded
/// more than once if it has more than one associated value.
#[derive(Debug)]
pub struct IterMut<'a> {
inner: http::header::IterMut<'a, http::header::HeaderValue>,
}
/// A drain iterator of all values associated with a single metadata key.
#[derive(Debug)]
pub struct ValueDrain<'a, VE: ValueEncoding> {
inner: http::header::ValueDrain<'a, http::header::HeaderValue>,
phantom: PhantomData<VE>,
}
/// An iterator over `MetadataMap` keys.
///
/// Yields `KeyRef` values. Each header name is yielded only once, even if it
/// has more than one associated value.
#[derive(Debug)]
pub struct Keys<'a> {
inner: http::header::Keys<'a, http::header::HeaderValue>,
}
/// Reference to a key in a `MetadataMap`. It can point
/// to either an ascii or a binary ("*-bin") key.
#[derive(Debug)]
pub enum KeyRef<'a> {
/// An ascii metadata key and value.
Ascii(&'a MetadataKey<Ascii>),
/// A binary metadata key and value.
Binary(&'a MetadataKey<Binary>),
}
/// `MetadataMap` value iterator.
///
/// Yields `ValueRef` values. Each value contained in the `MetadataMap` will be
/// yielded.
#[derive(Debug)]
pub struct Values<'a> {
// Need to use http::header::Iter and not http::header::Values to be able
// to know if a value is binary or not.
inner: http::header::Iter<'a, http::header::HeaderValue>,
}
/// Reference to a value in a `MetadataMap`. It can point
/// to either an ascii or a binary ("*-bin" key) value.
#[derive(Debug)]
pub enum ValueRef<'a> {
/// An ascii metadata key and value.
Ascii(&'a MetadataValue<Ascii>),
/// A binary metadata key and value.
Binary(&'a MetadataValue<Binary>),
}
/// `MetadataMap` value iterator.
///
/// Each value contained in the `MetadataMap` will be yielded.
#[derive(Debug)]
pub struct ValuesMut<'a> {
// Need to use http::header::IterMut and not http::header::ValuesMut to be
// able to know if a value is binary or not.
inner: http::header::IterMut<'a, http::header::HeaderValue>,
}
/// Reference to a value in a `MetadataMap`. It can point
/// to either an ascii or a binary ("*-bin" key) value.
#[derive(Debug)]
pub enum ValueRefMut<'a> {
/// An ascii metadata key and value.
Ascii(&'a mut MetadataValue<Ascii>),
/// A binary metadata key and value.
Binary(&'a mut MetadataValue<Binary>),
}
/// An iterator of all values associated with a single metadata key.
#[derive(Debug)]
pub struct ValueIter<'a, VE: ValueEncoding> {
inner: Option<http::header::ValueIter<'a, http::header::HeaderValue>>,
phantom: PhantomData<VE>,
}
/// An iterator of all values associated with a single metadata key.
#[derive(Debug)]
pub struct ValueIterMut<'a, VE: ValueEncoding> {
inner: http::header::ValueIterMut<'a, http::header::HeaderValue>,
phantom: PhantomData<VE>,
}
/// A view to all values stored in a single entry.
///
/// This struct is returned by `MetadataMap::get_all` and
/// `MetadataMap::get_all_bin`.
#[derive(Debug)]
pub struct GetAll<'a, VE: ValueEncoding> {
inner: Option<http::header::GetAll<'a, http::header::HeaderValue>>,
phantom: PhantomData<VE>,
}
/// A view into a single location in a `MetadataMap`, which may be vacant or
/// occupied.
#[derive(Debug)]
pub enum Entry<'a, VE: ValueEncoding> {
/// An occupied entry
Occupied(OccupiedEntry<'a, VE>),
/// A vacant entry
Vacant(VacantEntry<'a, VE>),
}
/// A view into a single empty location in a `MetadataMap`.
///
/// This struct is returned as part of the `Entry` enum.
#[derive(Debug)]
pub struct VacantEntry<'a, VE: ValueEncoding> {
inner: http::header::VacantEntry<'a, http::header::HeaderValue>,
phantom: PhantomData<VE>,
}
/// A view into a single occupied location in a `MetadataMap`.
///
/// This struct is returned as part of the `Entry` enum.
#[derive(Debug)]
pub struct OccupiedEntry<'a, VE: ValueEncoding> {
inner: http::header::OccupiedEntry<'a, http::header::HeaderValue>,
phantom: PhantomData<VE>,
}
pub(crate) const GRPC_TIMEOUT_HEADER: &str = "grpc-timeout";
// ===== impl MetadataMap =====
impl MetadataMap {
// Headers reserved by the gRPC protocol.
pub(crate) const GRPC_RESERVED_HEADERS: [&'static str; 6] = [
"te",
"user-agent",
"content-type",
"grpc-message",
"grpc-message-type",
"grpc-status",
];
/// Create an empty `MetadataMap`.
///
/// The map will be created without any capacity. This function will not
/// allocate.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let map = MetadataMap::new();
///
/// assert!(map.is_empty());
/// assert_eq!(0, map.capacity());
/// ```
pub fn new() -> Self {
MetadataMap::with_capacity(0)
}
/// Convert an HTTP HeaderMap to a MetadataMap
pub fn from_headers(headers: http::HeaderMap) -> Self {
MetadataMap { headers }
}
/// Convert a MetadataMap into a HTTP HeaderMap
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("x-host", "example.com".parse().unwrap());
///
/// let http_map = map.into_headers();
///
/// assert_eq!(http_map.get("x-host").unwrap(), "example.com");
/// ```
pub fn into_headers(self) -> http::HeaderMap {
self.headers
}
pub(crate) fn into_sanitized_headers(mut self) -> http::HeaderMap {
for r in &Self::GRPC_RESERVED_HEADERS {
self.headers.remove(*r);
}
self.headers
}
/// Create an empty `MetadataMap` with the specified capacity.
///
/// The returned map will allocate internal storage in order to hold about
/// `capacity` elements without reallocating. However, this is a "best
/// effort" as there are usage patterns that could cause additional
/// allocations before `capacity` metadata entries are stored in the map.
///
/// More capacity than requested may be allocated.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let map: MetadataMap = MetadataMap::with_capacity(10);
///
/// assert!(map.is_empty());
/// assert!(map.capacity() >= 10);
/// ```
pub fn with_capacity(capacity: usize) -> MetadataMap {
MetadataMap {
headers: http::HeaderMap::with_capacity(capacity),
}
}
/// Returns the number of metadata entries (ascii and binary) stored in the
/// map.
///
/// This number represents the total number of **values** stored in the map.
/// This number can be greater than or equal to the number of **keys**
/// stored given that a single key may have more than one associated value.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// assert_eq!(0, map.len());
///
/// map.insert("x-host-ip", "127.0.0.1".parse().unwrap());
/// map.insert_bin("x-host-name-bin", MetadataValue::from_bytes(b"localhost"));
///
/// assert_eq!(2, map.len());
///
/// map.append("x-host-ip", "text/html".parse().unwrap());
///
/// assert_eq!(3, map.len());
/// ```
pub fn len(&self) -> usize {
self.headers.len()
}
/// Returns the number of keys (ascii and binary) stored in the map.
///
/// This number will be less than or equal to `len()` as each key may have
/// more than one associated value.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// assert_eq!(0, map.keys_len());
///
/// map.insert("x-host-ip", "127.0.0.1".parse().unwrap());
/// map.insert_bin("x-host-name-bin", MetadataValue::from_bytes(b"localhost"));
///
/// assert_eq!(2, map.keys_len());
///
/// map.append("x-host-ip", "text/html".parse().unwrap());
///
/// assert_eq!(2, map.keys_len());
/// ```
pub fn keys_len(&self) -> usize {
self.headers.keys_len()
}
/// Returns true if the map contains no elements.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// assert!(map.is_empty());
///
/// map.insert("x-host", "hello.world".parse().unwrap());
///
/// assert!(!map.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.headers.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory
/// for reuse.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("x-host", "hello.world".parse().unwrap());
///
/// map.clear();
/// assert!(map.is_empty());
/// assert!(map.capacity() > 0);
/// ```
pub fn clear(&mut self) {
self.headers.clear();
}
/// Returns the number of custom metadata entries the map can hold without
/// reallocating.
///
/// This number is an approximation as certain usage patterns could cause
/// additional allocations before the returned capacity is filled.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// assert_eq!(0, map.capacity());
///
/// map.insert("x-host", "hello.world".parse().unwrap());
/// assert_eq!(6, map.capacity());
/// ```
pub fn capacity(&self) -> usize {
self.headers.capacity()
}
/// Reserves capacity for at least `additional` more custom metadata to be
/// inserted into the `MetadataMap`.
///
/// The metadata map may reserve more space to avoid frequent reallocations.
/// Like with `with_capacity`, this will be a "best effort" to avoid
/// allocations until `additional` more custom metadata is inserted. Certain
/// usage patterns could cause additional allocations before the number is
/// reached.
///
/// # Panics
///
/// Panics if the new allocation size overflows `usize`.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.reserve(10);
/// # map.insert("x-host", "bar".parse().unwrap());
/// ```
pub fn reserve(&mut self, additional: usize) {
self.headers.reserve(additional);
}
/// Returns a reference to the value associated with the key. This method
/// is for ascii metadata entries (those whose names don't end with
/// "-bin"). For binary entries, use get_bin.
///
/// If there are multiple values associated with the key, then the first one
/// is returned. Use `get_all` to get all values associated with a given
/// key. Returns `None` if there are no values associated with the key.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// assert!(map.get("x-host").is_none());
///
/// map.insert("x-host", "hello".parse().unwrap());
/// assert_eq!(map.get("x-host").unwrap(), &"hello");
/// assert_eq!(map.get("x-host").unwrap(), &"hello");
///
/// map.append("x-host", "world".parse().unwrap());
/// assert_eq!(map.get("x-host").unwrap(), &"hello");
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append_bin("host-bin", MetadataValue::from_bytes(b"world"));
/// assert!(map.get("host-bin").is_none());
/// assert!(map.get("host-bin".to_string()).is_none());
/// assert!(map.get(&("host-bin".to_string())).is_none());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(map.get("host{}bin").is_none());
/// assert!(map.get("host{}bin".to_string()).is_none());
/// assert!(map.get(&("host{}bin".to_string())).is_none());
/// ```
pub fn get<K>(&self, key: K) -> Option<&MetadataValue<Ascii>>
where
K: AsMetadataKey<Ascii>,
{
key.get(self)
}
/// Like get, but for Binary keys (for example "trace-proto-bin").
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// assert!(map.get_bin("trace-proto-bin").is_none());
///
/// map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"hello"));
/// assert_eq!(map.get_bin("trace-proto-bin").unwrap(), &"hello");
/// assert_eq!(map.get_bin("trace-proto-bin").unwrap(), &"hello");
///
/// map.append_bin("trace-proto-bin", MetadataValue::from_bytes(b"world"));
/// assert_eq!(map.get_bin("trace-proto-bin").unwrap(), &"hello");
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append("host", "world".parse().unwrap());
/// assert!(map.get_bin("host").is_none());
/// assert!(map.get_bin("host".to_string()).is_none());
/// assert!(map.get_bin(&("host".to_string())).is_none());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(map.get_bin("host{}-bin").is_none());
/// assert!(map.get_bin("host{}-bin".to_string()).is_none());
/// assert!(map.get_bin(&("host{}-bin".to_string())).is_none());
/// ```
pub fn get_bin<K>(&self, key: K) -> Option<&MetadataValue<Binary>>
where
K: AsMetadataKey<Binary>,
{
key.get(self)
}
/// Returns a mutable reference to the value associated with the key. This
/// method is for ascii metadata entries (those whose names don't end with
/// "-bin"). For binary entries, use get_mut_bin.
///
/// If there are multiple values associated with the key, then the first one
/// is returned. Use `entry` to get all values associated with a given
/// key. Returns `None` if there are no values associated with the key.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
/// map.insert("x-host", "hello".parse().unwrap());
/// map.get_mut("x-host").unwrap().set_sensitive(true);
///
/// assert!(map.get("x-host").unwrap().is_sensitive());
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append_bin("host-bin", MetadataValue::from_bytes(b"world"));
/// assert!(map.get_mut("host-bin").is_none());
/// assert!(map.get_mut("host-bin".to_string()).is_none());
/// assert!(map.get_mut(&("host-bin".to_string())).is_none());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(map.get_mut("host{}").is_none());
/// assert!(map.get_mut("host{}".to_string()).is_none());
/// assert!(map.get_mut(&("host{}".to_string())).is_none());
/// ```
pub fn get_mut<K>(&mut self, key: K) -> Option<&mut MetadataValue<Ascii>>
where
K: AsMetadataKey<Ascii>,
{
key.get_mut(self)
}
/// Like get_mut, but for Binary keys (for example "trace-proto-bin").
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
/// map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"hello"));
/// map.get_bin_mut("trace-proto-bin").unwrap().set_sensitive(true);
///
/// assert!(map.get_bin("trace-proto-bin").unwrap().is_sensitive());
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append("host", "world".parse().unwrap());
/// assert!(map.get_bin_mut("host").is_none());
/// assert!(map.get_bin_mut("host".to_string()).is_none());
/// assert!(map.get_bin_mut(&("host".to_string())).is_none());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(map.get_bin_mut("host{}-bin").is_none());
/// assert!(map.get_bin_mut("host{}-bin".to_string()).is_none());
/// assert!(map.get_bin_mut(&("host{}-bin".to_string())).is_none());
/// ```
pub fn get_bin_mut<K>(&mut self, key: K) -> Option<&mut MetadataValue<Binary>>
where
K: AsMetadataKey<Binary>,
{
key.get_mut(self)
}
/// Returns a view of all values associated with a key. This method is for
/// ascii metadata entries (those whose names don't end with "-bin"). For
/// binary entries, use get_all_bin.
///
/// The returned view does not incur any allocations and allows iterating
/// the values associated with the key. See [`GetAll`] for more details.
/// Returns `None` if there are no values associated with the key.
///
/// [`GetAll`]: struct.GetAll.html
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// map.insert("x-host", "hello".parse().unwrap());
/// map.append("x-host", "goodbye".parse().unwrap());
///
/// {
/// let view = map.get_all("x-host");
///
/// let mut iter = view.iter();
/// assert_eq!(&"hello", iter.next().unwrap());
/// assert_eq!(&"goodbye", iter.next().unwrap());
/// assert!(iter.next().is_none());
/// }
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append_bin("host-bin", MetadataValue::from_bytes(b"world"));
/// assert!(map.get_all("host-bin").iter().next().is_none());
/// assert!(map.get_all("host-bin".to_string()).iter().next().is_none());
/// assert!(map.get_all(&("host-bin".to_string())).iter().next().is_none());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(map.get_all("host{}").iter().next().is_none());
/// assert!(map.get_all("host{}".to_string()).iter().next().is_none());
/// assert!(map.get_all(&("host{}".to_string())).iter().next().is_none());
/// ```
pub fn get_all<K>(&self, key: K) -> GetAll<'_, Ascii>
where
K: AsMetadataKey<Ascii>,
{
GetAll {
inner: key.get_all(self),
phantom: PhantomData,
}
}
/// Like get_all, but for Binary keys (for example "trace-proto-bin").
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"hello"));
/// map.append_bin("trace-proto-bin", MetadataValue::from_bytes(b"goodbye"));
///
/// {
/// let view = map.get_all_bin("trace-proto-bin");
///
/// let mut iter = view.iter();
/// assert_eq!(&"hello", iter.next().unwrap());
/// assert_eq!(&"goodbye", iter.next().unwrap());
/// assert!(iter.next().is_none());
/// }
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append("host", "world".parse().unwrap());
/// assert!(map.get_all_bin("host").iter().next().is_none());
/// assert!(map.get_all_bin("host".to_string()).iter().next().is_none());
/// assert!(map.get_all_bin(&("host".to_string())).iter().next().is_none());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(map.get_all_bin("host{}-bin").iter().next().is_none());
/// assert!(map.get_all_bin("host{}-bin".to_string()).iter().next().is_none());
/// assert!(map.get_all_bin(&("host{}-bin".to_string())).iter().next().is_none());
/// ```
pub fn get_all_bin<K>(&self, key: K) -> GetAll<'_, Binary>
where
K: AsMetadataKey<Binary>,
{
GetAll {
inner: key.get_all(self),
phantom: PhantomData,
}
}
/// Returns true if the map contains a value for the specified key. This
/// method works for both ascii and binary entries.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// assert!(!map.contains_key("x-host"));
///
/// map.append_bin("host-bin", MetadataValue::from_bytes(b"world"));
/// map.insert("x-host", "world".parse().unwrap());
///
/// // contains_key works for both Binary and Ascii keys:
/// assert!(map.contains_key("x-host"));
/// assert!(map.contains_key("host-bin"));
///
/// // contains_key returns false for invalid keys:
/// assert!(!map.contains_key("x{}host"));
/// ```
pub fn contains_key<K>(&self, key: K) -> bool
where
K: AsEncodingAgnosticMetadataKey,
{
key.contains_key(self)
}
/// An iterator visiting all key-value pairs (both ascii and binary).
///
/// The iteration order is arbitrary, but consistent across platforms for
/// the same crate version. Each key will be yielded once per associated
/// value. So, if a key has 3 associated values, it will be yielded 3 times.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// map.insert("x-word", "hello".parse().unwrap());
/// map.append("x-word", "goodbye".parse().unwrap());
/// map.insert("x-number", "123".parse().unwrap());
///
/// for key_and_value in map.iter() {
/// match key_and_value {
/// KeyAndValueRef::Ascii(ref key, ref value) =>
/// println!("Ascii: {:?}: {:?}", key, value),
/// KeyAndValueRef::Binary(ref key, ref value) =>
/// println!("Binary: {:?}: {:?}", key, value),
/// }
/// }
/// ```
pub fn iter(&self) -> Iter<'_> {
Iter {
inner: self.headers.iter(),
}
}
/// An iterator visiting all key-value pairs, with mutable value references.
///
/// The iterator order is arbitrary, but consistent across platforms for the
/// same crate version. Each key will be yielded once per associated value,
/// so if a key has 3 associated values, it will be yielded 3 times.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// map.insert("x-word", "hello".parse().unwrap());
/// map.append("x-word", "goodbye".parse().unwrap());
/// map.insert("x-number", "123".parse().unwrap());
///
/// for key_and_value in map.iter_mut() {
/// match key_and_value {
/// KeyAndMutValueRef::Ascii(key, mut value) =>
/// value.set_sensitive(true),
/// KeyAndMutValueRef::Binary(key, mut value) =>
/// value.set_sensitive(false),
/// }
/// }
/// ```
pub fn iter_mut(&mut self) -> IterMut<'_> {
IterMut {
inner: self.headers.iter_mut(),
}
}
/// An iterator visiting all keys.
///
/// The iteration order is arbitrary, but consistent across platforms for
/// the same crate version. Each key will be yielded only once even if it
/// has multiple associated values.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// map.insert("x-word", "hello".parse().unwrap());
/// map.append("x-word", "goodbye".parse().unwrap());
/// map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
///
/// for key in map.keys() {
/// match key {
/// KeyRef::Ascii(ref key) =>
/// println!("Ascii key: {:?}", key),
/// KeyRef::Binary(ref key) =>
/// println!("Binary key: {:?}", key),
/// }
/// println!("{:?}", key);
/// }
/// ```
pub fn keys(&self) -> Keys<'_> {
Keys {
inner: self.headers.keys(),
}
}
/// An iterator visiting all values (both ascii and binary).
///
/// The iteration order is arbitrary, but consistent across platforms for
/// the same crate version.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// map.insert("x-word", "hello".parse().unwrap());
/// map.append("x-word", "goodbye".parse().unwrap());
/// map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
///
/// for value in map.values() {
/// match value {
/// ValueRef::Ascii(ref value) =>
/// println!("Ascii value: {:?}", value),
/// ValueRef::Binary(ref value) =>
/// println!("Binary value: {:?}", value),
/// }
/// println!("{:?}", value);
/// }
/// ```
pub fn values(&self) -> Values<'_> {
Values {
inner: self.headers.iter(),
}
}
/// An iterator visiting all values mutably.
///
/// The iteration order is arbitrary, but consistent across platforms for
/// the same crate version.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
///
/// map.insert("x-word", "hello".parse().unwrap());
/// map.append("x-word", "goodbye".parse().unwrap());
/// map.insert("x-number", "123".parse().unwrap());
///
/// for value in map.values_mut() {
/// match value {
/// ValueRefMut::Ascii(mut value) =>
/// value.set_sensitive(true),
/// ValueRefMut::Binary(mut value) =>
/// value.set_sensitive(false),
/// }
/// }
/// ```
pub fn values_mut(&mut self) -> ValuesMut<'_> {
ValuesMut {
inner: self.headers.iter_mut(),
}
}
/// Gets the given ascii key's corresponding entry in the map for in-place
/// manipulation. For binary keys, use `entry_bin`.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
///
/// let headers = &[
/// "content-length",
/// "x-hello",
/// "Content-Length",
/// "x-world",
/// ];
///
/// for &header in headers {
/// let counter = map.entry(header).unwrap().or_insert("".parse().unwrap());
/// *counter = format!("{}{}", counter.to_str().unwrap(), "1").parse().unwrap();
/// }
///
/// assert_eq!(map.get("content-length").unwrap(), "11");
/// assert_eq!(map.get("x-hello").unwrap(), "1");
///
/// // Gracefully handles parting invalid key strings
/// assert!(!map.entry("a{}b").is_ok());
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append_bin("host-bin", MetadataValue::from_bytes(b"world"));
/// assert!(!map.entry("host-bin").is_ok());
/// assert!(!map.entry("host-bin".to_string()).is_ok());
/// assert!(!map.entry(&("host-bin".to_string())).is_ok());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(!map.entry("host{}").is_ok());
/// assert!(!map.entry("host{}".to_string()).is_ok());
/// assert!(!map.entry(&("host{}".to_string())).is_ok());
/// ```
pub fn entry<K>(&mut self, key: K) -> Result<Entry<'_, Ascii>, InvalidMetadataKey>
where
K: AsMetadataKey<Ascii>,
{
self.generic_entry::<Ascii, K>(key)
}
/// Gets the given Binary key's corresponding entry in the map for in-place
/// manipulation.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// # use std::str;
/// let mut map = MetadataMap::default();
///
/// let headers = &[
/// "content-length-bin",
/// "x-hello-bin",
/// "Content-Length-bin",
/// "x-world-bin",
/// ];
///
/// for &header in headers {
/// let counter = map.entry_bin(header).unwrap().or_insert(MetadataValue::from_bytes(b""));
/// *counter = MetadataValue::from_bytes(format!("{}{}", str::from_utf8(counter.to_bytes().unwrap().as_ref()).unwrap(), "1").as_bytes());
/// }
///
/// assert_eq!(map.get_bin("content-length-bin").unwrap(), "11");
/// assert_eq!(map.get_bin("x-hello-bin").unwrap(), "1");
///
/// // Attempting to read a key of the wrong type fails by not
/// // finding anything.
/// map.append("host", "world".parse().unwrap());
/// assert!(!map.entry_bin("host").is_ok());
/// assert!(!map.entry_bin("host".to_string()).is_ok());
/// assert!(!map.entry_bin(&("host".to_string())).is_ok());
///
/// // Attempting to read an invalid key string fails by not
/// // finding anything.
/// assert!(!map.entry_bin("host{}-bin").is_ok());
/// assert!(!map.entry_bin("host{}-bin".to_string()).is_ok());
/// assert!(!map.entry_bin(&("host{}-bin".to_string())).is_ok());
/// ```
pub fn entry_bin<K>(&mut self, key: K) -> Result<Entry<'_, Binary>, InvalidMetadataKey>
where
K: AsMetadataKey<Binary>,
{
self.generic_entry::<Binary, K>(key)
}
fn generic_entry<VE: ValueEncoding, K>(
&mut self,
key: K,
) -> Result<Entry<'_, VE>, InvalidMetadataKey>
where
K: AsMetadataKey<VE>,
{
match key.entry(self) {
Ok(entry) => Ok(match entry {
http::header::Entry::Occupied(e) => Entry::Occupied(OccupiedEntry {
inner: e,
phantom: PhantomData,
}),
http::header::Entry::Vacant(e) => Entry::Vacant(VacantEntry {
inner: e,
phantom: PhantomData,
}),
}),
Err(err) => Err(err),
}
}
/// Inserts an ascii key-value pair into the map. To insert a binary entry,
/// use `insert_bin`.
///
/// This method panics when the given key is a string and it cannot be
/// converted to a MetadataKey<Ascii>.
///
/// If the map did not previously have this key present, then `None` is
/// returned.
///
/// If the map did have this key present, the new value is associated with
/// the key and all previous values are removed. **Note** that only a single
/// one of the previous values is returned. If there are multiple values
/// that have been previously associated with the key, then the first one is
/// returned. See `insert_mult` on `OccupiedEntry` for an API that returns
/// all values.
///
/// The key is not updated, though; this matters for types that can be `==`
/// without being identical.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// assert!(map.insert("x-host", "world".parse().unwrap()).is_none());
/// assert!(!map.is_empty());
///
/// let mut prev = map.insert("x-host", "earth".parse().unwrap()).unwrap();
/// assert_eq!("world", prev);
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// // Trying to insert a key that is not valid panics.
/// map.insert("x{}host", "world".parse().unwrap());
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// // Trying to insert a key that is binary panics (use insert_bin).
/// map.insert("x-host-bin", "world".parse().unwrap());
/// ```
pub fn insert<K>(&mut self, key: K, val: MetadataValue<Ascii>) -> Option<MetadataValue<Ascii>>
where
K: IntoMetadataKey<Ascii>,
{
key.insert(self, val)
}
/// Like insert, but for Binary keys (for example "trace-proto-bin").
///
/// This method panics when the given key is a string and it cannot be
/// converted to a MetadataKey<Binary>.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// assert!(map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"world")).is_none());
/// assert!(!map.is_empty());
///
/// let mut prev = map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"earth")).unwrap();
/// assert_eq!("world", prev);
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
/// // Attempting to add a binary metadata entry with an invalid name
/// map.insert_bin("trace-proto", MetadataValue::from_bytes(b"hello")); // This line panics!
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// // Trying to insert a key that is not valid panics.
/// map.insert_bin("x{}host-bin", MetadataValue::from_bytes(b"world")); // This line panics!
/// ```
pub fn insert_bin<K>(
&mut self,
key: K,
val: MetadataValue<Binary>,
) -> Option<MetadataValue<Binary>>
where
K: IntoMetadataKey<Binary>,
{
key.insert(self, val)
}
/// Inserts an ascii key-value pair into the map. To insert a binary entry,
/// use `append_bin`.
///
/// This method panics when the given key is a string and it cannot be
/// converted to a MetadataKey<Ascii>.
///
/// If the map did not previously have this key present, then `false` is
/// returned.
///
/// If the map did have this key present, the new value is pushed to the end
/// of the list of values currently associated with the key. The key is not
/// updated, though; this matters for types that can be `==` without being
/// identical.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// assert!(map.insert("x-host", "world".parse().unwrap()).is_none());
/// assert!(!map.is_empty());
///
/// map.append("x-host", "earth".parse().unwrap());
///
/// let values = map.get_all("x-host");
/// let mut i = values.iter();
/// assert_eq!("world", *i.next().unwrap());
/// assert_eq!("earth", *i.next().unwrap());
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// // Trying to append a key that is not valid panics.
/// map.append("x{}host", "world".parse().unwrap()); // This line panics!
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// // Trying to append a key that is binary panics (use append_bin).
/// map.append("x-host-bin", "world".parse().unwrap()); // This line panics!
/// ```
pub fn append<K>(&mut self, key: K, value: MetadataValue<Ascii>) -> bool
where
K: IntoMetadataKey<Ascii>,
{
key.append(self, value)
}
/// Like append, but for binary keys (for example "trace-proto-bin").
///
/// This method panics when the given key is a string and it cannot be
/// converted to a MetadataKey<Binary>.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// assert!(map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"world")).is_none());
/// assert!(!map.is_empty());
///
/// map.append_bin("trace-proto-bin", MetadataValue::from_bytes(b"earth"));
///
/// let values = map.get_all_bin("trace-proto-bin");
/// let mut i = values.iter();
/// assert_eq!("world", *i.next().unwrap());
/// assert_eq!("earth", *i.next().unwrap());
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// // Trying to append a key that is not valid panics.
/// map.append_bin("x{}host-bin", MetadataValue::from_bytes(b"world")); // This line panics!
/// ```
///
/// ```should_panic
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// // Trying to append a key that is ascii panics (use append).
/// map.append_bin("x-host", MetadataValue::from_bytes(b"world")); // This line panics!
/// ```
pub fn append_bin<K>(&mut self, key: K, value: MetadataValue<Binary>) -> bool
where
K: IntoMetadataKey<Binary>,
{
key.append(self, value)
}
/// Removes an ascii key from the map, returning the value associated with
/// the key. To remove a binary key, use `remove_bin`.
///
/// Returns `None` if the map does not contain the key. If there are
/// multiple values associated with the key, then the first one is returned.
/// See `remove_entry_mult` on `OccupiedEntry` for an API that yields all
/// values.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("x-host", "hello.world".parse().unwrap());
///
/// let prev = map.remove("x-host").unwrap();
/// assert_eq!("hello.world", prev);
///
/// assert!(map.remove("x-host").is_none());
///
/// // Attempting to remove a key of the wrong type fails by not
/// // finding anything.
/// map.append_bin("host-bin", MetadataValue::from_bytes(b"world"));
/// assert!(map.remove("host-bin").is_none());
/// assert!(map.remove("host-bin".to_string()).is_none());
/// assert!(map.remove(&("host-bin".to_string())).is_none());
///
/// // Attempting to remove an invalid key string fails by not
/// // finding anything.
/// assert!(map.remove("host{}").is_none());
/// assert!(map.remove("host{}".to_string()).is_none());
/// assert!(map.remove(&("host{}".to_string())).is_none());
/// ```
pub fn remove<K>(&mut self, key: K) -> Option<MetadataValue<Ascii>>
where
K: AsMetadataKey<Ascii>,
{
key.remove(self)
}
/// Like remove, but for Binary keys (for example "trace-proto-bin").
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"hello.world"));
///
/// let prev = map.remove_bin("trace-proto-bin").unwrap();
/// assert_eq!("hello.world", prev);
///
/// assert!(map.remove_bin("trace-proto-bin").is_none());
///
/// // Attempting to remove a key of the wrong type fails by not
/// // finding anything.
/// map.append("host", "world".parse().unwrap());
/// assert!(map.remove_bin("host").is_none());
/// assert!(map.remove_bin("host".to_string()).is_none());
/// assert!(map.remove_bin(&("host".to_string())).is_none());
///
/// // Attempting to remove an invalid key string fails by not
/// // finding anything.
/// assert!(map.remove_bin("host{}-bin").is_none());
/// assert!(map.remove_bin("host{}-bin".to_string()).is_none());
/// assert!(map.remove_bin(&("host{}-bin".to_string())).is_none());
/// ```
pub fn remove_bin<K>(&mut self, key: K) -> Option<MetadataValue<Binary>>
where
K: AsMetadataKey<Binary>,
{
key.remove(self)
}
pub(crate) fn merge(&mut self, other: MetadataMap) {
self.headers.extend(other.headers);
}
}
// ===== impl Iter =====
impl<'a> Iterator for Iter<'a> {
type Item = KeyAndValueRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|item| {
let (ref name, value) = item;
if Ascii::is_valid_key(name.as_str()) {
KeyAndValueRef::Ascii(
MetadataKey::unchecked_from_header_name_ref(name),
MetadataValue::unchecked_from_header_value_ref(value),
)
} else {
KeyAndValueRef::Binary(
MetadataKey::unchecked_from_header_name_ref(name),
MetadataValue::unchecked_from_header_value_ref(value),
)
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
// ===== impl IterMut =====
impl<'a> Iterator for IterMut<'a> {
type Item = KeyAndMutValueRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|item| {
let (name, value) = item;
if Ascii::is_valid_key(name.as_str()) {
KeyAndMutValueRef::Ascii(
MetadataKey::unchecked_from_header_name_ref(name),
MetadataValue::unchecked_from_mut_header_value_ref(value),
)
} else {
KeyAndMutValueRef::Binary(
MetadataKey::unchecked_from_header_name_ref(name),
MetadataValue::unchecked_from_mut_header_value_ref(value),
)
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
// ===== impl ValueDrain =====
impl<'a, VE: ValueEncoding> Iterator for ValueDrain<'a, VE> {
type Item = MetadataValue<VE>;
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(MetadataValue::unchecked_from_header_value)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
// ===== impl Keys =====
impl<'a> Iterator for Keys<'a> {
type Item = KeyRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|key| {
if Ascii::is_valid_key(key.as_str()) {
KeyRef::Ascii(MetadataKey::unchecked_from_header_name_ref(key))
} else {
KeyRef::Binary(MetadataKey::unchecked_from_header_name_ref(key))
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a> ExactSizeIterator for Keys<'a> {}
// ===== impl Values ====
impl<'a> Iterator for Values<'a> {
type Item = ValueRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|item| {
let (ref name, value) = item;
if Ascii::is_valid_key(name.as_str()) {
ValueRef::Ascii(MetadataValue::unchecked_from_header_value_ref(value))
} else {
ValueRef::Binary(MetadataValue::unchecked_from_header_value_ref(value))
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
// ===== impl Values ====
impl<'a> Iterator for ValuesMut<'a> {
type Item = ValueRefMut<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|item| {
let (name, value) = item;
if Ascii::is_valid_key(name.as_str()) {
ValueRefMut::Ascii(MetadataValue::unchecked_from_mut_header_value_ref(value))
} else {
ValueRefMut::Binary(MetadataValue::unchecked_from_mut_header_value_ref(value))
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
// ===== impl ValueIter =====
impl<'a, VE: ValueEncoding> Iterator for ValueIter<'a, VE>
where
VE: 'a,
{
type Item = &'a MetadataValue<VE>;
fn next(&mut self) -> Option<Self::Item> {
match self.inner {
Some(ref mut inner) => inner
.next()
.map(&MetadataValue::unchecked_from_header_value_ref),
None => None,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.inner {
Some(ref inner) => inner.size_hint(),
None => (0, Some(0)),
}
}
}
impl<'a, VE: ValueEncoding> DoubleEndedIterator for ValueIter<'a, VE>
where
VE: 'a,
{
fn next_back(&mut self) -> Option<Self::Item> {
match self.inner {
Some(ref mut inner) => inner
.next_back()
.map(&MetadataValue::unchecked_from_header_value_ref),
None => None,
}
}
}
// ===== impl ValueIterMut =====
impl<'a, VE: ValueEncoding> Iterator for ValueIterMut<'a, VE>
where
VE: 'a,
{
type Item = &'a mut MetadataValue<VE>;
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(&MetadataValue::unchecked_from_mut_header_value_ref)
}
}
impl<'a, VE: ValueEncoding> DoubleEndedIterator for ValueIterMut<'a, VE>
where
VE: 'a,
{
fn next_back(&mut self) -> Option<Self::Item> {
self.inner
.next_back()
.map(&MetadataValue::unchecked_from_mut_header_value_ref)
}
}
// ===== impl Entry =====
impl<'a, VE: ValueEncoding> Entry<'a, VE> {
/// Ensures a value is in the entry by inserting the default if empty.
///
/// Returns a mutable reference to the **first** value in the entry.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map: MetadataMap = MetadataMap::default();
///
/// let keys = &[
/// "content-length",
/// "x-hello",
/// "Content-Length",
/// "x-world",
/// ];
///
/// for &key in keys {
/// let counter = map.entry(key)
/// .expect("valid key names")
/// .or_insert("".parse().unwrap());
/// *counter = format!("{}{}", counter.to_str().unwrap(), "1").parse().unwrap();
/// }
///
/// assert_eq!(map.get("content-length").unwrap(), "11");
/// assert_eq!(map.get("x-hello").unwrap(), "1");
/// ```
pub fn or_insert(self, default: MetadataValue<VE>) -> &'a mut MetadataValue<VE> {
use self::Entry::*;
match self {
Occupied(e) => e.into_mut(),
Vacant(e) => e.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default
/// function if empty.
///
/// The default function is not called if the entry exists in the map.
/// Returns a mutable reference to the **first** value in the entry.
///
/// # Examples
///
/// Basic usage.
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// let res = map.entry("x-hello").unwrap()
/// .or_insert_with(|| "world".parse().unwrap());
///
/// assert_eq!(res, "world");
/// ```
///
/// The default function is not called if the entry exists in the map.
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "world".parse().unwrap());
///
/// let res = map.entry("host")
/// .expect("host is a valid string")
/// .or_insert_with(|| unreachable!());
///
///
/// assert_eq!(res, "world");
/// ```
pub fn or_insert_with<F: FnOnce() -> MetadataValue<VE>>(
self,
default: F,
) -> &'a mut MetadataValue<VE> {
use self::Entry::*;
match self {
Occupied(e) => e.into_mut(),
Vacant(e) => e.insert(default()),
}
}
/// Returns a reference to the entry's key
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// assert_eq!(map.entry("x-hello").unwrap().key(), "x-hello");
/// ```
pub fn key(&self) -> &MetadataKey<VE> {
use self::Entry::*;
MetadataKey::unchecked_from_header_name_ref(match *self {
Vacant(ref e) => e.inner.key(),
Occupied(ref e) => e.inner.key(),
})
}
}
// ===== impl VacantEntry =====
impl<'a, VE: ValueEncoding> VacantEntry<'a, VE> {
/// Returns a reference to the entry's key
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// assert_eq!(map.entry("x-hello").unwrap().key(), "x-hello");
/// ```
pub fn key(&self) -> &MetadataKey<VE> {
MetadataKey::unchecked_from_header_name_ref(self.inner.key())
}
/// Take ownership of the key
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// if let Entry::Vacant(v) = map.entry("x-hello").unwrap() {
/// assert_eq!(v.into_key().as_str(), "x-hello");
/// }
/// ```
pub fn into_key(self) -> MetadataKey<VE> {
MetadataKey::unchecked_from_header_name(self.inner.into_key())
}
/// Insert the value into the entry.
///
/// The value will be associated with this entry's key. A mutable reference
/// to the inserted value will be returned.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// if let Entry::Vacant(v) = map.entry("x-hello").unwrap() {
/// v.insert("world".parse().unwrap());
/// }
///
/// assert_eq!(map.get("x-hello").unwrap(), "world");
/// ```
pub fn insert(self, value: MetadataValue<VE>) -> &'a mut MetadataValue<VE> {
MetadataValue::unchecked_from_mut_header_value_ref(self.inner.insert(value.inner))
}
/// Insert the value into the entry.
///
/// The value will be associated with this entry's key. The new
/// `OccupiedEntry` is returned, allowing for further manipulation.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
///
/// if let Entry::Vacant(v) = map.entry("x-hello").unwrap() {
/// let mut e = v.insert_entry("world".parse().unwrap());
/// e.insert("world2".parse().unwrap());
/// }
///
/// assert_eq!(map.get("x-hello").unwrap(), "world2");
/// ```
pub fn insert_entry(self, value: MetadataValue<VE>) -> OccupiedEntry<'a, Ascii> {
OccupiedEntry {
inner: self.inner.insert_entry(value.inner),
phantom: PhantomData,
}
}
}
// ===== impl OccupiedEntry =====
impl<'a, VE: ValueEncoding> OccupiedEntry<'a, VE> {
/// Returns a reference to the entry's key.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "world".parse().unwrap());
///
/// if let Entry::Occupied(e) = map.entry("host").unwrap() {
/// assert_eq!("host", e.key());
/// }
/// ```
pub fn key(&self) -> &MetadataKey<VE> {
MetadataKey::unchecked_from_header_name_ref(self.inner.key())
}
/// Get a reference to the first value in the entry.
///
/// Values are stored in insertion order.
///
/// # Panics
///
/// `get` panics if there are no values associated with the entry.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "hello.world".parse().unwrap());
///
/// if let Entry::Occupied(mut e) = map.entry("host").unwrap() {
/// assert_eq!(e.get(), &"hello.world");
///
/// e.append("hello.earth".parse().unwrap());
///
/// assert_eq!(e.get(), &"hello.world");
/// }
/// ```
pub fn get(&self) -> &MetadataValue<VE> {
MetadataValue::unchecked_from_header_value_ref(self.inner.get())
}
/// Get a mutable reference to the first value in the entry.
///
/// Values are stored in insertion order.
///
/// # Panics
///
/// `get_mut` panics if there are no values associated with the entry.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
/// map.insert("host", "hello.world".parse().unwrap());
///
/// if let Entry::Occupied(mut e) = map.entry("host").unwrap() {
/// e.get_mut().set_sensitive(true);
/// assert_eq!(e.get(), &"hello.world");
/// assert!(e.get().is_sensitive());
/// }
/// ```
pub fn get_mut(&mut self) -> &mut MetadataValue<VE> {
MetadataValue::unchecked_from_mut_header_value_ref(self.inner.get_mut())
}
/// Converts the `OccupiedEntry` into a mutable reference to the **first**
/// value.
///
/// The lifetime of the returned reference is bound to the original map.
///
/// # Panics
///
/// `into_mut` panics if there are no values associated with the entry.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
/// map.insert("host", "hello.world".parse().unwrap());
/// map.append("host", "hello.earth".parse().unwrap());
///
/// if let Entry::Occupied(e) = map.entry("host").unwrap() {
/// e.into_mut().set_sensitive(true);
/// }
///
/// assert!(map.get("host").unwrap().is_sensitive());
/// ```
pub fn into_mut(self) -> &'a mut MetadataValue<VE> {
MetadataValue::unchecked_from_mut_header_value_ref(self.inner.into_mut())
}
/// Sets the value of the entry.
///
/// All previous values associated with the entry are removed and the first
/// one is returned. See `insert_mult` for an API that returns all values.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "hello.world".parse().unwrap());
///
/// if let Entry::Occupied(mut e) = map.entry("host").unwrap() {
/// let mut prev = e.insert("earth".parse().unwrap());
/// assert_eq!("hello.world", prev);
/// }
///
/// assert_eq!("earth", map.get("host").unwrap());
/// ```
pub fn insert(&mut self, value: MetadataValue<VE>) -> MetadataValue<VE> {
let header_value = self.inner.insert(value.inner);
MetadataValue::unchecked_from_header_value(header_value)
}
/// Sets the value of the entry.
///
/// This function does the same as `insert` except it returns an iterator
/// that yields all values previously associated with the key.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "world".parse().unwrap());
/// map.append("host", "world2".parse().unwrap());
///
/// if let Entry::Occupied(mut e) = map.entry("host").unwrap() {
/// let mut prev = e.insert_mult("earth".parse().unwrap());
/// assert_eq!("world", prev.next().unwrap());
/// assert_eq!("world2", prev.next().unwrap());
/// assert!(prev.next().is_none());
/// }
///
/// assert_eq!("earth", map.get("host").unwrap());
/// ```
pub fn insert_mult(&mut self, value: MetadataValue<VE>) -> ValueDrain<'_, VE> {
ValueDrain {
inner: self.inner.insert_mult(value.inner),
phantom: PhantomData,
}
}
/// Insert the value into the entry.
///
/// The new value is appended to the end of the entry's value list. All
/// previous values associated with the entry are retained.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "world".parse().unwrap());
///
/// if let Entry::Occupied(mut e) = map.entry("host").unwrap() {
/// e.append("earth".parse().unwrap());
/// }
///
/// let values = map.get_all("host");
/// let mut i = values.iter();
/// assert_eq!("world", *i.next().unwrap());
/// assert_eq!("earth", *i.next().unwrap());
/// ```
pub fn append(&mut self, value: MetadataValue<VE>) {
self.inner.append(value.inner)
}
/// Remove the entry from the map.
///
/// All values associated with the entry are removed and the first one is
/// returned. See `remove_entry_mult` for an API that returns all values.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "world".parse().unwrap());
///
/// if let Entry::Occupied(e) = map.entry("host").unwrap() {
/// let mut prev = e.remove();
/// assert_eq!("world", prev);
/// }
///
/// assert!(!map.contains_key("host"));
/// ```
pub fn remove(self) -> MetadataValue<VE> {
let value = self.inner.remove();
MetadataValue::unchecked_from_header_value(value)
}
/// Remove the entry from the map.
///
/// The key and all values associated with the entry are removed and the
/// first one is returned. See `remove_entry_mult` for an API that returns
/// all values.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "world".parse().unwrap());
///
/// if let Entry::Occupied(e) = map.entry("host").unwrap() {
/// let (key, mut prev) = e.remove_entry();
/// assert_eq!("host", key.as_str());
/// assert_eq!("world", prev);
/// }
///
/// assert!(!map.contains_key("host"));
/// ```
pub fn remove_entry(self) -> (MetadataKey<VE>, MetadataValue<VE>) {
let (name, value) = self.inner.remove_entry();
(
MetadataKey::unchecked_from_header_name(name),
MetadataValue::unchecked_from_header_value(value),
)
}
/// Remove the entry from the map.
///
/// The key and all values associated with the entry are removed and
/// returned.
pub fn remove_entry_mult(self) -> (MetadataKey<VE>, ValueDrain<'a, VE>) {
let (name, value_drain) = self.inner.remove_entry_mult();
(
MetadataKey::unchecked_from_header_name(name),
ValueDrain {
inner: value_drain,
phantom: PhantomData,
},
)
}
/// Returns an iterator visiting all values associated with the entry.
///
/// Values are iterated in insertion order.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("host", "world".parse().unwrap());
/// map.append("host", "earth".parse().unwrap());
///
/// if let Entry::Occupied(e) = map.entry("host").unwrap() {
/// let mut iter = e.iter();
/// assert_eq!(&"world", iter.next().unwrap());
/// assert_eq!(&"earth", iter.next().unwrap());
/// assert!(iter.next().is_none());
/// }
/// ```
pub fn iter(&self) -> ValueIter<'_, VE> {
ValueIter {
inner: Some(self.inner.iter()),
phantom: PhantomData,
}
}
/// Returns an iterator mutably visiting all values associated with the
/// entry.
///
/// Values are iterated in insertion order.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::default();
/// map.insert("host", "world".parse().unwrap());
/// map.append("host", "earth".parse().unwrap());
///
/// if let Entry::Occupied(mut e) = map.entry("host").unwrap() {
/// for e in e.iter_mut() {
/// e.set_sensitive(true);
/// }
/// }
///
/// let mut values = map.get_all("host");
/// let mut i = values.iter();
/// assert!(i.next().unwrap().is_sensitive());
/// assert!(i.next().unwrap().is_sensitive());
/// ```
pub fn iter_mut(&mut self) -> ValueIterMut<'_, VE> {
ValueIterMut {
inner: self.inner.iter_mut(),
phantom: PhantomData,
}
}
}
impl<'a, VE: ValueEncoding> IntoIterator for OccupiedEntry<'a, VE>
where
VE: 'a,
{
type Item = &'a mut MetadataValue<VE>;
type IntoIter = ValueIterMut<'a, VE>;
fn into_iter(self) -> ValueIterMut<'a, VE> {
ValueIterMut {
inner: self.inner.into_iter(),
phantom: PhantomData,
}
}
}
impl<'a, 'b: 'a, VE: ValueEncoding> IntoIterator for &'b OccupiedEntry<'a, VE> {
type Item = &'a MetadataValue<VE>;
type IntoIter = ValueIter<'a, VE>;
fn into_iter(self) -> ValueIter<'a, VE> {
self.iter()
}
}
impl<'a, 'b: 'a, VE: ValueEncoding> IntoIterator for &'b mut OccupiedEntry<'a, VE> {
type Item = &'a mut MetadataValue<VE>;
type IntoIter = ValueIterMut<'a, VE>;
fn into_iter(self) -> ValueIterMut<'a, VE> {
self.iter_mut()
}
}
// ===== impl GetAll =====
impl<'a, VE: ValueEncoding> GetAll<'a, VE> {
/// Returns an iterator visiting all values associated with the entry.
///
/// Values are iterated in insertion order.
///
/// # Examples
///
/// ```
/// # use tonic::metadata::*;
/// let mut map = MetadataMap::new();
/// map.insert("x-host", "hello.world".parse().unwrap());
/// map.append("x-host", "hello.earth".parse().unwrap());
///
/// let values = map.get_all("x-host");
/// let mut iter = values.iter();
/// assert_eq!(&"hello.world", iter.next().unwrap());
/// assert_eq!(&"hello.earth", iter.next().unwrap());
/// assert!(iter.next().is_none());
/// ```
pub fn iter(&self) -> ValueIter<'a, VE> {
ValueIter {
inner: self.inner.as_ref().map(|inner| inner.iter()),
phantom: PhantomData,
}
}
}
impl<'a, VE: ValueEncoding> PartialEq for GetAll<'a, VE> {
fn eq(&self, other: &Self) -> bool {
self.inner.iter().eq(other.inner.iter())
}
}
impl<'a, VE: ValueEncoding> IntoIterator for GetAll<'a, VE>
where
VE: 'a,
{
type Item = &'a MetadataValue<VE>;
type IntoIter = ValueIter<'a, VE>;
fn into_iter(self) -> ValueIter<'a, VE> {
ValueIter {
inner: self.inner.map(|inner| inner.into_iter()),
phantom: PhantomData,
}
}
}
impl<'a, 'b: 'a, VE: ValueEncoding> IntoIterator for &'b GetAll<'a, VE> {
type Item = &'a MetadataValue<VE>;
type IntoIter = ValueIter<'a, VE>;
fn into_iter(self) -> ValueIter<'a, VE> {
ValueIter {
inner: (&self.inner).as_ref().map(|inner| inner.into_iter()),
phantom: PhantomData,
}
}
}
// ===== impl IntoMetadataKey / AsMetadataKey =====
mod into_metadata_key {
use super::{MetadataMap, MetadataValue, ValueEncoding};
use crate::metadata::key::MetadataKey;
/// A marker trait used to identify values that can be used as insert keys
/// to a `MetadataMap`.
pub trait IntoMetadataKey<VE: ValueEncoding>: Sealed<VE> {}
// All methods are on this pub(super) trait, instead of `IntoMetadataKey`,
// so that they aren't publicly exposed to the world.
//
// Being on the `IntoMetadataKey` trait would mean users could call
// `"host".insert(&mut map, "localhost")`.
//
// Ultimately, this allows us to adjust the signatures of these methods
// without breaking any external crate.
pub trait Sealed<VE: ValueEncoding> {
#[doc(hidden)]
fn insert(self, map: &mut MetadataMap, val: MetadataValue<VE>)
-> Option<MetadataValue<VE>>;
#[doc(hidden)]
fn append(self, map: &mut MetadataMap, val: MetadataValue<VE>) -> bool;
}
// ==== impls ====
impl<VE: ValueEncoding> Sealed<VE> for MetadataKey<VE> {
#[doc(hidden)]
#[inline]
fn insert(
self,
map: &mut MetadataMap,
val: MetadataValue<VE>,
) -> Option<MetadataValue<VE>> {
map.headers
.insert(self.inner, val.inner)
.map(&MetadataValue::unchecked_from_header_value)
}
#[doc(hidden)]
#[inline]
fn append(self, map: &mut MetadataMap, val: MetadataValue<VE>) -> bool {
map.headers.append(self.inner, val.inner)
}
}
impl<VE: ValueEncoding> IntoMetadataKey<VE> for MetadataKey<VE> {}
impl<'a, VE: ValueEncoding> Sealed<VE> for &'a MetadataKey<VE> {
#[doc(hidden)]
#[inline]
fn insert(
self,
map: &mut MetadataMap,
val: MetadataValue<VE>,
) -> Option<MetadataValue<VE>> {
map.headers
.insert(&self.inner, val.inner)
.map(&MetadataValue::unchecked_from_header_value)
}
#[doc(hidden)]
#[inline]
fn append(self, map: &mut MetadataMap, val: MetadataValue<VE>) -> bool {
map.headers.append(&self.inner, val.inner)
}
}
impl<'a, VE: ValueEncoding> IntoMetadataKey<VE> for &'a MetadataKey<VE> {}
impl<VE: ValueEncoding> Sealed<VE> for &'static str {
#[doc(hidden)]
#[inline]
fn insert(
self,
map: &mut MetadataMap,
val: MetadataValue<VE>,
) -> Option<MetadataValue<VE>> {
// Perform name validation
let key = MetadataKey::<VE>::from_static(self);
map.headers
.insert(key.inner, val.inner)
.map(&MetadataValue::unchecked_from_header_value)
}
#[doc(hidden)]
#[inline]
fn append(self, map: &mut MetadataMap, val: MetadataValue<VE>) -> bool {
// Perform name validation
let key = MetadataKey::<VE>::from_static(self);
map.headers.append(key.inner, val.inner)
}
}
impl<VE: ValueEncoding> IntoMetadataKey<VE> for &'static str {}
}
mod as_metadata_key {
use super::{MetadataMap, MetadataValue, ValueEncoding};
use crate::metadata::key::{InvalidMetadataKey, MetadataKey};
use http::header::{Entry, GetAll, HeaderValue};
/// A marker trait used to identify values that can be used as search keys
/// to a `MetadataMap`.
pub trait AsMetadataKey<VE: ValueEncoding>: Sealed<VE> {}
// All methods are on this pub(super) trait, instead of `AsMetadataKey`,
// so that they aren't publicly exposed to the world.
//
// Being on the `AsMetadataKey` trait would mean users could call
// `"host".find(&map)`.
//
// Ultimately, this allows us to adjust the signatures of these methods
// without breaking any external crate.
pub trait Sealed<VE: ValueEncoding> {
#[doc(hidden)]
fn get(self, map: &MetadataMap) -> Option<&MetadataValue<VE>>;
#[doc(hidden)]
fn get_mut(self, map: &mut MetadataMap) -> Option<&mut MetadataValue<VE>>;
#[doc(hidden)]
fn get_all(self, map: &MetadataMap) -> Option<GetAll<'_, HeaderValue>>;
#[doc(hidden)]
fn entry(self, map: &mut MetadataMap)
-> Result<Entry<'_, HeaderValue>, InvalidMetadataKey>;
#[doc(hidden)]
fn remove(self, map: &mut MetadataMap) -> Option<MetadataValue<VE>>;
}
// ==== impls ====
impl<VE: ValueEncoding> Sealed<VE> for MetadataKey<VE> {
#[doc(hidden)]
#[inline]
fn get(self, map: &MetadataMap) -> Option<&MetadataValue<VE>> {
map.headers
.get(self.inner)
.map(&MetadataValue::unchecked_from_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_mut(self, map: &mut MetadataMap) -> Option<&mut MetadataValue<VE>> {
map.headers
.get_mut(self.inner)
.map(&MetadataValue::unchecked_from_mut_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_all(self, map: &MetadataMap) -> Option<GetAll<'_, HeaderValue>> {
Some(map.headers.get_all(self.inner))
}
#[doc(hidden)]
#[inline]
fn entry(
self,
map: &mut MetadataMap,
) -> Result<Entry<'_, HeaderValue>, InvalidMetadataKey> {
Ok(map.headers.entry(self.inner))
}
#[doc(hidden)]
#[inline]
fn remove(self, map: &mut MetadataMap) -> Option<MetadataValue<VE>> {
map.headers
.remove(self.inner)
.map(&MetadataValue::unchecked_from_header_value)
}
}
impl<VE: ValueEncoding> AsMetadataKey<VE> for MetadataKey<VE> {}
impl<'a, VE: ValueEncoding> Sealed<VE> for &'a MetadataKey<VE> {
#[doc(hidden)]
#[inline]
fn get(self, map: &MetadataMap) -> Option<&MetadataValue<VE>> {
map.headers
.get(&self.inner)
.map(&MetadataValue::unchecked_from_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_mut(self, map: &mut MetadataMap) -> Option<&mut MetadataValue<VE>> {
map.headers
.get_mut(&self.inner)
.map(&MetadataValue::unchecked_from_mut_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_all(self, map: &MetadataMap) -> Option<GetAll<'_, HeaderValue>> {
Some(map.headers.get_all(&self.inner))
}
#[doc(hidden)]
#[inline]
fn entry(
self,
map: &mut MetadataMap,
) -> Result<Entry<'_, HeaderValue>, InvalidMetadataKey> {
Ok(map.headers.entry(&self.inner))
}
#[doc(hidden)]
#[inline]
fn remove(self, map: &mut MetadataMap) -> Option<MetadataValue<VE>> {
map.headers
.remove(&self.inner)
.map(&MetadataValue::unchecked_from_header_value)
}
}
impl<'a, VE: ValueEncoding> AsMetadataKey<VE> for &'a MetadataKey<VE> {}
impl<'a, VE: ValueEncoding> Sealed<VE> for &'a str {
#[doc(hidden)]
#[inline]
fn get(self, map: &MetadataMap) -> Option<&MetadataValue<VE>> {
if !VE::is_valid_key(self) {
return None;
}
map.headers
.get(self)
.map(&MetadataValue::unchecked_from_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_mut(self, map: &mut MetadataMap) -> Option<&mut MetadataValue<VE>> {
if !VE::is_valid_key(self) {
return None;
}
map.headers
.get_mut(self)
.map(&MetadataValue::unchecked_from_mut_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_all(self, map: &MetadataMap) -> Option<GetAll<'_, HeaderValue>> {
if !VE::is_valid_key(self) {
return None;
}
Some(map.headers.get_all(self))
}
#[doc(hidden)]
#[inline]
fn entry(
self,
map: &mut MetadataMap,
) -> Result<Entry<'_, HeaderValue>, InvalidMetadataKey> {
if !VE::is_valid_key(self) {
return Err(InvalidMetadataKey::new());
}
let key = http::header::HeaderName::from_bytes(self.as_bytes())
.map_err(|_| InvalidMetadataKey::new())?;
let entry = map.headers.entry(key);
Ok(entry)
}
#[doc(hidden)]
#[inline]
fn remove(self, map: &mut MetadataMap) -> Option<MetadataValue<VE>> {
if !VE::is_valid_key(self) {
return None;
}
map.headers
.remove(self)
.map(&MetadataValue::unchecked_from_header_value)
}
}
impl<'a, VE: ValueEncoding> AsMetadataKey<VE> for &'a str {}
impl<VE: ValueEncoding> Sealed<VE> for String {
#[doc(hidden)]
#[inline]
fn get(self, map: &MetadataMap) -> Option<&MetadataValue<VE>> {
if !VE::is_valid_key(self.as_str()) {
return None;
}
map.headers
.get(self.as_str())
.map(&MetadataValue::unchecked_from_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_mut(self, map: &mut MetadataMap) -> Option<&mut MetadataValue<VE>> {
if !VE::is_valid_key(self.as_str()) {
return None;
}
map.headers
.get_mut(self.as_str())
.map(&MetadataValue::unchecked_from_mut_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_all(self, map: &MetadataMap) -> Option<GetAll<'_, HeaderValue>> {
if !VE::is_valid_key(self.as_str()) {
return None;
}
Some(map.headers.get_all(self.as_str()))
}
#[doc(hidden)]
#[inline]
fn entry(
self,
map: &mut MetadataMap,
) -> Result<Entry<'_, HeaderValue>, InvalidMetadataKey> {
if !VE::is_valid_key(self.as_str()) {
return Err(InvalidMetadataKey::new());
}
let key = http::header::HeaderName::from_bytes(self.as_bytes())
.map_err(|_| InvalidMetadataKey::new())?;
Ok(map.headers.entry(key))
}
#[doc(hidden)]
#[inline]
fn remove(self, map: &mut MetadataMap) -> Option<MetadataValue<VE>> {
if !VE::is_valid_key(self.as_str()) {
return None;
}
map.headers
.remove(self.as_str())
.map(&MetadataValue::unchecked_from_header_value)
}
}
impl<VE: ValueEncoding> AsMetadataKey<VE> for String {}
impl<'a, VE: ValueEncoding> Sealed<VE> for &'a String {
#[doc(hidden)]
#[inline]
fn get(self, map: &MetadataMap) -> Option<&MetadataValue<VE>> {
if !VE::is_valid_key(self) {
return None;
}
map.headers
.get(self.as_str())
.map(&MetadataValue::unchecked_from_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_mut(self, map: &mut MetadataMap) -> Option<&mut MetadataValue<VE>> {
if !VE::is_valid_key(self) {
return None;
}
map.headers
.get_mut(self.as_str())
.map(&MetadataValue::unchecked_from_mut_header_value_ref)
}
#[doc(hidden)]
#[inline]
fn get_all(self, map: &MetadataMap) -> Option<GetAll<'_, HeaderValue>> {
if !VE::is_valid_key(self) {
return None;
}
Some(map.headers.get_all(self.as_str()))
}
#[doc(hidden)]
#[inline]
fn entry(
self,
map: &mut MetadataMap,
) -> Result<Entry<'_, HeaderValue>, InvalidMetadataKey> {
if !VE::is_valid_key(self) {
return Err(InvalidMetadataKey::new());
}
let key = http::header::HeaderName::from_bytes(self.as_bytes())
.map_err(|_| InvalidMetadataKey::new())?;
Ok(map.headers.entry(key))
}
#[doc(hidden)]
#[inline]
fn remove(self, map: &mut MetadataMap) -> Option<MetadataValue<VE>> {
if !VE::is_valid_key(self) {
return None;
}
map.headers
.remove(self.as_str())
.map(&MetadataValue::unchecked_from_header_value)
}
}
impl<'a, VE: ValueEncoding> AsMetadataKey<VE> for &'a String {}
}
mod as_encoding_agnostic_metadata_key {
use super::{MetadataMap, ValueEncoding};
use crate::metadata::key::MetadataKey;
/// A marker trait used to identify values that can be used as search keys
/// to a `MetadataMap`, for operations that don't expose the actual value.
pub trait AsEncodingAgnosticMetadataKey: Sealed {}
// All methods are on this pub(super) trait, instead of
// `AsEncodingAgnosticMetadataKey`, so that they aren't publicly exposed to
// the world.
//
// Being on the `AsEncodingAgnosticMetadataKey` trait would mean users could
// call `"host".contains_key(&map)`.
//
// Ultimately, this allows us to adjust the signatures of these methods
// without breaking any external crate.
pub trait Sealed {
#[doc(hidden)]
fn contains_key(&self, map: &MetadataMap) -> bool;
}
// ==== impls ====
impl<VE: ValueEncoding> Sealed for MetadataKey<VE> {
#[doc(hidden)]
#[inline]
fn contains_key(&self, map: &MetadataMap) -> bool {
map.headers.contains_key(&self.inner)
}
}
impl<VE: ValueEncoding> AsEncodingAgnosticMetadataKey for MetadataKey<VE> {}
impl<'a, VE: ValueEncoding> Sealed for &'a MetadataKey<VE> {
#[doc(hidden)]
#[inline]
fn contains_key(&self, map: &MetadataMap) -> bool {
map.headers.contains_key(&self.inner)
}
}
impl<'a, VE: ValueEncoding> AsEncodingAgnosticMetadataKey for &'a MetadataKey<VE> {}
impl<'a> Sealed for &'a str {
#[doc(hidden)]
#[inline]
fn contains_key(&self, map: &MetadataMap) -> bool {
map.headers.contains_key(*self)
}
}
impl<'a> AsEncodingAgnosticMetadataKey for &'a str {}
impl Sealed for String {
#[doc(hidden)]
#[inline]
fn contains_key(&self, map: &MetadataMap) -> bool {
map.headers.contains_key(self.as_str())
}
}
impl AsEncodingAgnosticMetadataKey for String {}
impl<'a> Sealed for &'a String {
#[doc(hidden)]
#[inline]
fn contains_key(&self, map: &MetadataMap) -> bool {
map.headers.contains_key(self.as_str())
}
}
impl<'a> AsEncodingAgnosticMetadataKey for &'a String {}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_headers_takes_http_headers() {
let mut http_map = http::HeaderMap::new();
http_map.insert("x-host", "example.com".parse().unwrap());
let map = MetadataMap::from_headers(http_map);
assert_eq!(map.get("x-host").unwrap(), "example.com");
}
#[test]
fn test_to_headers_encoding() {
use crate::Code;
use crate::Status;
let special_char_message = "Beyond ascii \t\n\r🌶️💉💧🐮🍺";
let s1 = Status::new(Code::Unknown, special_char_message);
assert_eq!(s1.message(), special_char_message);
let s1_map = s1.to_header_map().unwrap();
let s2 = Status::from_header_map(&s1_map).unwrap();
assert_eq!(s1.message(), s2.message());
}
#[test]
fn test_iter_categorizes_ascii_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
let mut found_x_word = false;
for key_and_value in map.iter() {
if let KeyAndValueRef::Ascii(ref key, ref _value) = key_and_value {
if key.as_str() == "x-word" {
found_x_word = true;
} else {
panic!("Unexpected key");
}
}
}
assert!(found_x_word);
}
#[test]
fn test_iter_categorizes_binary_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
let mut found_x_word_bin = false;
for key_and_value in map.iter() {
if let KeyAndValueRef::Binary(ref key, ref _value) = key_and_value {
if key.as_str() == "x-word-bin" {
found_x_word_bin = true;
} else {
panic!("Unexpected key");
}
}
}
assert!(found_x_word_bin);
}
#[test]
fn test_iter_mut_categorizes_ascii_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
let mut found_x_word = false;
for key_and_value in map.iter_mut() {
if let KeyAndMutValueRef::Ascii(ref key, ref _value) = key_and_value {
if key.as_str() == "x-word" {
found_x_word = true;
} else {
panic!("Unexpected key");
}
}
}
assert!(found_x_word);
}
#[test]
fn test_iter_mut_categorizes_binary_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
let mut found_x_word_bin = false;
for key_and_value in map.iter_mut() {
if let KeyAndMutValueRef::Binary(ref key, ref _value) = key_and_value {
if key.as_str() == "x-word-bin" {
found_x_word_bin = true;
} else {
panic!("Unexpected key");
}
}
}
assert!(found_x_word_bin);
}
#[test]
fn test_keys_categorizes_ascii_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
let mut found_x_word = false;
for key in map.keys() {
if let KeyRef::Ascii(key) = key {
if key.as_str() == "x-word" {
found_x_word = true;
} else {
panic!("Unexpected key");
}
}
}
assert!(found_x_word);
}
#[test]
fn test_keys_categorizes_binary_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
let mut found_x_number_bin = false;
for key in map.keys() {
if let KeyRef::Binary(key) = key {
if key.as_str() == "x-number-bin" {
found_x_number_bin = true;
} else {
panic!("Unexpected key");
}
}
}
assert!(found_x_number_bin);
}
#[test]
fn test_values_categorizes_ascii_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
let mut found_x_word = false;
for value in map.values() {
if let ValueRef::Ascii(value) = value {
if *value == "hello" {
found_x_word = true;
} else {
panic!("Unexpected key");
}
}
}
assert!(found_x_word);
}
#[test]
fn test_values_categorizes_binary_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
let mut found_x_word_bin = false;
for value_ref in map.values() {
if let ValueRef::Binary(value) = value_ref {
assert_eq!(*value, "goodbye");
found_x_word_bin = true;
}
}
assert!(found_x_word_bin);
}
#[test]
fn test_values_mut_categorizes_ascii_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
map.insert_bin("x-number-bin", MetadataValue::from_bytes(b"123"));
let mut found_x_word = false;
for value_ref in map.values_mut() {
if let ValueRefMut::Ascii(value) = value_ref {
assert_eq!(*value, "hello");
found_x_word = true;
}
}
assert!(found_x_word);
}
#[test]
fn test_values_mut_categorizes_binary_entries() {
let mut map = MetadataMap::new();
map.insert("x-word", "hello".parse().unwrap());
map.append_bin("x-word-bin", MetadataValue::from_bytes(b"goodbye"));
let mut found_x_word_bin = false;
for value in map.values_mut() {
if let ValueRefMut::Binary(value) = value {
assert_eq!(*value, "goodbye");
found_x_word_bin = true;
}
}
assert!(found_x_word_bin);
}
#[allow(dead_code)]
fn value_drain_is_send_sync() {
fn is_send_sync<T: Send + Sync>() {}
is_send_sync::<Iter<'_>>();
is_send_sync::<IterMut<'_>>();
is_send_sync::<ValueDrain<'_, Ascii>>();
is_send_sync::<ValueDrain<'_, Binary>>();
is_send_sync::<ValueIterMut<'_, Ascii>>();
is_send_sync::<ValueIterMut<'_, Binary>>();
}
}
| 32.179581 | 145 | 0.548462 |
f5a2682f15e237b29b95fa55ac9559d89241c23b | 55 | mod entropic_packument;
pub use entropic_packument::*;
| 18.333333 | 30 | 0.818182 |
3810593490208386b2649e4924af0380f3fbbecc | 14,071 | //! Version 2 of the SERCOM pads module
//!
//! This module implements the [`Pad`] type, which represents a [`Pin`]
//! configured to act as a SERCOM pad. A [`Pad`] is parameterized by three
//! types. The first two types identify the pad by its [`Sercom`] and
//! [`PadNum`]. However, each SERCOM pad can usually be mapped to several
//! possible GPIO pins. The third type must implement the [`Map`] trait, which
//! identifies a corresponding [`PinId`] and [`PinMode`]. The [`PinMode`] is
//! usually [`AlternateC`] or [`AlternateD`].
//!
//! To create a [`Pad`], use the [`From`]/[`Into`] traits. Upon creation, the
//! [`Pad`] takes ownership of the [`Pin`]. The conversion from [`Pin`] to
//! [`Pad`] is potentially many-valued, so it usually must be constrained. On
//! the other hand, the conversion from [`Pad`] to [`Pin`] is always unique,
//! because the [`Pad`] always knows which [`Pin`] it contains.
//!
//! ```rust
//! let pad: Pad<Sercom0, Pad0, IoSet1> = pins.pa08.into();
//! let pin: Pin<_, _> = pad.into();
//! ```
//!
//! Because of differences in the way pins are mapped to SERCOM pads, the
//! [`Map`] trait is implemented on different types, depending on the chip
//! series. See the [`Map`] documentation for more details.
//!
//! As a result, the actual implementations of [`Map`] are not found in this
//! module. They are included in the [`pad_map`] module.
//!
//! [`pad_map`]: crate::sercom::v2::pad_map
use core::ops::Deref;
use crate::paste::paste;
use crate::target_device::sercom0;
use crate::target_device::{SERCOM0, SERCOM1};
#[cfg(any(feature = "samd21", feature = "min-samd51g"))]
use crate::target_device::{SERCOM2, SERCOM3};
#[cfg(any(feature = "min-samd21g", feature = "min-samd51g"))]
use crate::target_device::{SERCOM4, SERCOM5};
#[cfg(feature = "min-samd51n")]
use crate::target_device::{SERCOM6, SERCOM7};
use crate::gpio::v2::*;
use crate::typelevel::*;
//==============================================================================
// Sercom
//==============================================================================
/// Type-level `enum` representing a Serial Communication Interface (SERCOM)
pub trait Sercom: Sealed {
/// Corresponding [PAC](crate::target_device) SERCOM type
type SERCOM: Deref<Target = sercom0::RegisterBlock>;
}
/// Type alias to extract the correct [PAC](crate::target_device) SERCOM type
/// from the [`Sercom`] instance
pub type SERCOM<S> = <S as Sercom>::SERCOM;
macro_rules! sercom {
( $($Sercom:ident),+ ) => {
paste! {
$(
/// Represents the corresponding SERCOM instance
pub enum $Sercom {}
impl Sealed for $Sercom {}
impl Sercom for $Sercom { type SERCOM = [<$Sercom:upper>]; }
)+
}
};
}
sercom!(Sercom0, Sercom1);
#[cfg(any(feature = "samd21", feature = "min-samd51g"))]
sercom!(Sercom2, Sercom3);
#[cfg(any(feature = "min-samd21g", feature = "min-samd51g"))]
sercom!(Sercom4, Sercom5);
#[cfg(feature = "min-samd51n")]
sercom!(Sercom6, Sercom7);
//==============================================================================
// PadNum
//==============================================================================
/// Type-level `enum` representing a SERCOM pad number
pub trait PadNum: Sealed {}
macro_rules! padnum {
( $( $PadNum:ident),+ ) => {
$(
/// Represents the corresponding SERCOM pad number
pub enum $PadNum {}
impl Sealed for $PadNum {}
impl PadNum for $PadNum {}
)+
};
}
padnum!(Pad0, Pad1, Pad2, Pad3);
//==============================================================================
// IoSet
//==============================================================================
/// Type-level `enum` representing a SERCOM IOSET configuration
#[cfg(feature = "min-samd51g")]
pub trait IoSet: Sealed {}
#[cfg(feature = "min-samd51g")]
macro_rules! ioset {
( $($IoSet:ident),+ ) => {
$(
/// Represents the corresponding IOSET
pub enum $IoSet {}
impl Sealed for $IoSet {}
impl IoSet for $IoSet {}
)+
};
}
#[cfg(feature = "min-samd51g")]
ioset!(IoSet1, IoSet2, IoSet3, IoSet4);
#[cfg(feature = "min-samd51j")]
ioset!(IoSet5);
#[cfg(feature = "min-samd51g")]
ioset!(IoSet6);
//==============================================================================
// Four-pad tuple struct
//==============================================================================
/// Tuple struct containing all four [`Pad`]s for a given [`Sercom`] and
/// [`IoSet`]
#[cfg(feature = "min-samd51g")]
pub struct Pads<S, I>(
pub Pad<S, Pad0, I>,
pub Pad<S, Pad1, I>,
pub Pad<S, Pad2, I>,
pub Pad<S, Pad3, I>,
)
where
S: Sercom,
I: IoSet + Map<S, Pad0> + Map<S, Pad1> + Map<S, Pad2> + Map<S, Pad3>;
//==============================================================================
// Pin-to-pad mapping
//==============================================================================
/// Type-level function mapping [`Pad`]s to [`Pin`]s
///
/// This trait acts as a type-level function. It takes two types as arguments,
/// the [`Sercom`] and [`PadNum`] of a [`Pad`], and returns the [`PinId`] and
/// [`PinMode`] for the corresponding [`Pin`].
///
/// For the SAMD51 and SAME5x series chips, all pins for a given SERCOM must
/// come from the same IOSET. To account for this, we introduce a new
#[cfg_attr(feature = "min-samd51g", doc = "[`IoSet`]")]
#[cfg_attr(not(feature = "min-samd51g"), doc = "`IoSet`")]
/// meta-type and implement [`Map`] on its instances. For a given [`Sercom`] and
/// [`PadNum`], the
#[cfg_attr(feature = "min-samd51g", doc = "[`IoSet`]")]
#[cfg_attr(not(feature = "min-samd51g"), doc = "`IoSet`")]
/// uniquely identifies a corresponding [`PinId`] and
/// [`PinMode`].
///
/// The SAMD11 and SAMD21 series chips are not limited by IOSET. Any combination
/// of valid pins for a given SERCOM is acceptable. Thus, the [`Map`] trait is
/// implemented directly on [`PinId`]s. Because the same [`Pin`] can often be
/// used for two different [`Pad`]s, the [`Map`] trait acts to map a
/// [`Sercom`]/[`PadNum`] pair to the correct [`PinMode`] for the [`PinId`].
pub trait Map<S, P>
where
S: Sercom,
P: PadNum,
{
/// The [`PinId`] for the corresponding pin
type Id: PinId;
/// The [`PinMode`] for the corresponding pin
type Mode: PinMode;
}
//==============================================================================
// Pad struct
//==============================================================================
/// Represents a SERCOM Pad configured to use a particular pin
///
/// Each [`Pad`] is parameterized by a [`Sercom`], a [`PadNum`], and a third
/// type that implements [`Map`], which is used to determine the corresponding
/// [`Pin`] and its configuration.
///
/// For the SAMD51 and SAME5x chips, [`Map`] is implemented on instances of
#[cfg_attr(feature = "min-samd51g", doc = "[`IoSet`]")]
#[cfg_attr(not(feature = "min-samd51g"), doc = "`IoSet`")]
/// . The SAMD11 and SAMD21 do not have any concept of IOSET, so
/// [`Map`] is instead implemented directly on the corresponding [`PinId`].
///
/// Each [`Pad`] takes ownership of the corresponding [`Pin`] for the duration
/// of its lifetime. [`Pad`]s can be converted to and from [`Pin`]s using the
/// [`Into`] and [`From`] traits.
pub struct Pad<S, P, M>
where
S: Sercom,
P: PadNum,
M: Map<S, P>,
{
pub(crate) pin: Pin<M::Id, M::Mode>,
}
impl<S, P, M> Pad<S, P, M>
where
S: Sercom,
P: PadNum,
M: Map<S, P>,
{
/// Create a new SERCOM [`Pad`] from a [`Pin`]
///
/// The specified [`Map`] type must map the specified [`Sercom`] and
/// [`PadNum`] to the given [`Pin`]
#[inline]
pub fn new<O: PinMode>(pin: Pin<M::Id, O>) -> Self
where
Pin<M::Id, O>: Into<Pin<M::Id, M::Mode>>,
{
Pad { pin: pin.into() }
}
/// Consume the [`Pad`] and release the corresponding [`Pin`]
#[inline]
pub fn free(self) -> Pin<M::Id, M::Mode> {
self.pin
}
/// Convert a [`Pad`] to a type that implements [`AnyPad`]
///
/// Even though there is a one-to-one mapping between `Pad<S, P, M>` and
/// `AnyPad<Sercom = S, PadNum = P, Map = M>`, the compiler doesn't know
/// that. This method provides a way to convert from a [`Pad`] to an
/// [`AnyPad`]. See the [`AnyPad`] trait for more details.
#[inline]
pub fn as_any<T>(self) -> T
where
T: AnyPad<Sercom = S, PadNum = P, Map = M>,
{
// SAFETY:
// core::ptr::read performs a bitwise copy, regardless of whether the
// type implements `Copy`. The returned value is a copy, so we must
// dispose of self. Because self contains no resources or allocations,
// we can simply drop it.
unsafe { core::ptr::read(&self as *const _ as *const T) }
}
}
impl<S, P, M> Sealed for Pad<S, P, M>
where
S: Sercom,
P: PadNum,
M: Map<S, P>,
{
}
//==============================================================================
// AnyPad meta-type
//==============================================================================
/// Type alias to convert from an implementation of [`AnyPad`] to the
/// corresponding concrete [`Pad`]
pub type ConcretePad<P> = Pad<<P as AnyPad>::Sercom, <P as AnyPad>::PadNum, <P as AnyPad>::Map>;
/// Meta-type representing any [`Pad`]
///
/// All instances of [`Pad`] implement this trait. When used as a trait bound,
/// it acts to encapsulate a [`Pad`]. Without this trait, a completely generic
/// [`Pad`] requires three type parameters, i.e. `Pad<S, P, M>`. But when using
/// this trait, only one type parameter is required, i.e. `P: AnyPad`. However,
/// even / though we have dropped type parameters, no information is lost,
/// because the [`Sercom`], [`PadNum`] and [`Map`] type parameters are stored as
/// associated types in the trait. The implementation of [`AnyPad`] looks
/// something like this:
///
/// ```rust
/// impl<S: Sercom, P: PadNum, M: Map<S, P>> AnyPad for Pad<S, P, M> {
/// type Sercom = S;
/// type PadNum = P;
/// type Map = M;
/// // ...
/// }
/// ```
///
/// Thus, there is a one-to-one mapping between `Pad<S, P, M>` and
/// `AnyPad<Sercom = S, PadNum = P, Map = M>`, so you can always recover the
/// full, concrete type from an implementation of [`AnyPad`]. The type alias
/// [`ConcretePad`] is / provided for just this purpose.
///
/// ## `AnyPad` as a trait bound
///
/// When using [`AnyPad`] as a trait bound, you can constrain the associated
/// types to restrict the acceptable [`Pad`]s. For example, you could restrict
/// a function to accept a particular pad number.
///
/// ```rust
/// fn example<P>(pad: P)
/// where
/// P: AnyPad<PadNum = Pad2>
/// {
/// }
/// ```
///
/// Or you could accept any pad number, as long as it's in the desired SERCOM.
///
/// ```rust
/// fn example<P>(pad: P)
/// where
/// P: AnyPad<Sercom = Sercom4>
/// {
/// }
/// ```
///
/// You can also apply more complex bounds.
///
/// ```rust
/// fn example<P>(pad: P)
/// where
/// P: AnyPad,
/// P::PadNum: UserTrait,
/// {
/// }
/// ```
///
/// ## Generic `AnyPad`s
///
/// Working with a generic type constrained by [`AnyPad`] is slightly different
/// than working with a concrete [`Pad`]. When compiling a generic function, the
/// compiler cannot assume anything about the specific concrete type. It can
/// only use what it knows about the [`AnyPad`] trait. To cast a generic type to
/// a concrete type, use the [`as_concrete`](AnyPad::as_concrete) method. To
/// cast back to the generic type, use the [`Pad`] method
/// [`as_any`](Pad::as_any).
pub trait AnyPad: Sealed {
type Sercom: Sercom;
type PadNum: PadNum;
type Map: Map<Self::Sercom, Self::PadNum>;
/// Convert a type that implements [`AnyPad`] to a concrete [`Pad`]
///
/// Even though there is a one-to-one mapping between `Pad<I, M>` and
/// `AnyPad<Sercom = S, PadNum = P, Map = M>`, the compiler doesn't know
/// that. This method provides a way to convert from an [`AnyPad`] to a
/// concrete [`Pad`].
fn as_concrete(self) -> ConcretePad<Self>;
}
impl<S, P, M> AnyPad for Pad<S, P, M>
where
S: Sercom,
P: PadNum,
M: Map<S, P>,
{
type Sercom = S;
type PadNum = P;
type Map = M;
#[inline]
fn as_concrete(self) -> ConcretePad<Self> {
self
}
}
//==============================================================================
// Optional pads
//==============================================================================
/// Meta-type representing an optional [`Pad`].
///
/// This trait is implemented for every [`Pad`], as well as for [`NoneT`].
pub trait OptionalPad {}
impl OptionalPad for NoneT {}
impl<P: AnyPad> OptionalPad for P {}
/// Meta-type representing a valid [`Pad`].
///
/// When used as a bound, this trait allows you to exclude [`NoneT`] and limit
/// the type to valid [`Pad`]s.
pub trait SomePad: OptionalPad + AnyPad {}
impl<P: AnyPad> SomePad for P {}
//==============================================================================
// Convert between pin and pad
//==============================================================================
impl<S, P, M> From<Pad<S, P, M>> for Pin<M::Id, M::Mode>
where
S: Sercom,
P: PadNum,
M: Map<S, P>,
{
/// Convert from a [`Pad`] to its corresponding [`Pin`].
///
/// This transformation is unique for a given [`Pad`].
#[inline]
fn from(pad: Pad<S, P, M>) -> Self {
pad.pin
}
}
impl<S, P, M, O> From<Pin<M::Id, O>> for Pad<S, P, M>
where
S: Sercom,
P: PadNum,
M: Map<S, P>,
O: PinMode,
Pin<M::Id, O>: Into<Pin<M::Id, M::Mode>>,
{
/// Convert from a [`Pin`] to its corresponding [`Pad`].
///
/// This conversion is not necessarily unique for a given [`Pin`]
#[inline]
fn from(pin: Pin<M::Id, O>) -> Self {
Pad::new(pin)
}
}
| 33.582339 | 96 | 0.546017 |
50b59f8c6a1c18f98b38192f0067de1df58bbfde | 5,129 | use cosmwasm_std::{Coin, Decimal, Uint128};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use tg_utils::{Duration, Expiration};
pub use crate::claim::Claim;
use tg4::Member;
const fn default_auto_return_limit() -> u64 {
20
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct InstantiateMsg {
/// denom of the token to stake
pub denom: String,
pub tokens_per_point: Uint128,
pub min_bond: Uint128,
/// unbounding period in seconds
pub unbonding_period: u64,
// admin can only add/remove hooks and slashers, not change other parameters
pub admin: Option<String>,
// or you can simply pre-authorize a number of hooks (to be done in following messages)
#[serde(default)]
pub preauths_hooks: u64,
// and you can pre-authorize a number of slashers the same way
#[serde(default)]
pub preauths_slashing: u64,
/// Limits how much claims would be automatically returned at end of block, 20 by default.
/// Setting this to 0 disables auto returning claims.
#[serde(default = "default_auto_return_limit")]
pub auto_return_limit: u64,
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
#[serde(rename_all = "snake_case")]
pub enum ExecuteMsg {
/// Bond will bond all staking tokens sent with the message and update membership points.
/// The optional `vesting_tokens` will be staked (delegated) as well, if set.
Bond { vesting_tokens: Option<Coin> },
/// Unbond will start the unbonding process for the given number of tokens.
/// The sender immediately loses points from these tokens, and can claim them
/// back to his wallet after `unbonding_period`.
/// Tokens will be unbonded from the liquid stake first, and then from the vesting stake
/// if available.
Unbond { tokens: Coin },
/// Claim is used to claim your native tokens that you previously "unbonded"
/// after the contract-defined waiting period (eg. 1 week)
Claim {},
/// Change the admin
UpdateAdmin { admin: Option<String> },
/// Add a new hook to be informed of all membership changes. Must be called by Admin
AddHook { addr: String },
/// Remove a hook. Must be called by Admin
RemoveHook { addr: String },
/// Add a new slasher. Must be called by Admin
AddSlasher { addr: String },
/// Remove a slasher. Must be called by Admin
RemoveSlasher { addr: String },
Slash {
addr: String,
// between (0.0, 1.0]
portion: Decimal,
},
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
#[serde(rename_all = "snake_case")]
pub enum QueryMsg {
/// Returns config
Configuration {},
/// Claims shows the tokens in process of unbonding for this address
Claims {
address: String,
limit: Option<u32>,
start_after: Option<Expiration>,
},
// Show the number of tokens currently staked by this address.
Staked {
address: String,
},
// Returns the unbonding period of this contract
UnbondingPeriod {},
/// Return AdminResponse
Admin {},
/// Return TotalPointsResponse. This is the amount of tokens bonded divided by
/// tokens_per_point.
TotalPoints {},
/// Returns MemberListResponse
ListMembers {
start_after: Option<String>,
limit: Option<u32>,
},
/// Returns MemberListResponse, sorted by points descending.
ListMembersByPoints {
start_after: Option<Member>,
limit: Option<u32>,
},
/// Returns MemberResponse
Member {
addr: String,
at_height: Option<u64>,
},
/// Shows all registered hooks. Returns HooksResponse.
Hooks {},
/// Return the current number of preauths. Returns PreauthResponse.
Preauths {},
/// Returns information (bool) whether given address is an active slasher
IsSlasher {
addr: String,
},
/// Returns all active slashers as vector of addresses
ListSlashers {},
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct StakedResponse {
pub liquid: Coin,
pub vesting: Coin,
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct PreauthResponse {
pub preauths_hooks: u64,
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct UnbondingPeriodResponse {
pub unbonding_period: Duration,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct ClaimsResponse {
pub claims: Vec<Claim>,
}
#[derive(Serialize, Deserialize, Clone, PartialEq, JsonSchema, Debug)]
pub struct TotalPointsResponse {
pub points: u64,
pub denom: String,
}
#[cfg(test)]
mod tests {
use super::*;
use cosmwasm_std::to_vec;
use tg_utils::Duration;
#[test]
fn unbonding_period_serializes_in_seconds() {
let res = UnbondingPeriodResponse {
unbonding_period: Duration::new(12345),
};
let json = to_vec(&res).unwrap();
assert_eq!(&json, br#"{"unbonding_period":12345}"#);
}
}
| 31.660494 | 94 | 0.672256 |
1a1234a84209347da66e667774dc1b5be0c2d5d3 | 13,808 | use ark_ec::msm::FixedBaseMSM;
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::PrimeField;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError};
use ark_std::{rand::Rng, One, UniformRand};
use sha2::{Digest, Sha256};
use std::io::{Read, Write};
use std::clone::Clone;
use super::commitment::{VKey, WKey};
use crate::Error;
use std::ops::MulAssign;
/// Maximum size of the generic SRS constructed from Filecoin and Zcash power of
/// taus.
///
/// https://github.com/nikkolasg/taupipp/blob/baca1426266bf39416c45303e35c966d69f4f8b4/src/bin/assemble.rs#L12
pub const MAX_SRS_SIZE: usize = (2 << 19) + 1;
/// It contains the maximum number of raw elements of the SRS needed to
/// aggregate and verify Groth16 proofs. One can derive specialized prover and
/// verifier key for _specific_ size of aggregations by calling
/// `srs.specialize(n)`. The specialized prover key also contains precomputed
/// tables that drastically increase prover's performance. This GenericSRS is
/// usually formed from the transcript of two distinct power of taus ceremony
/// ,in other words from two distinct Groth16 CRS.
/// See [there](https://github.com/nikkolasg/taupipp) a way on how to generate
/// this GenesisSRS.
#[derive(Clone, Debug)]
pub struct GenericSRS<E: PairingEngine> {
/// $\{g^a^i\}_{i=0}^{N}$ where N is the smallest size of the two Groth16 CRS.
pub g_alpha_powers: Vec<E::G1Affine>,
/// $\{h^a^i\}_{i=0}^{N}$ where N is the smallest size of the two Groth16 CRS.
pub h_alpha_powers: Vec<E::G2Affine>,
/// $\{g^b^i\}_{i=n}^{N}$ where N is the smallest size of the two Groth16 CRS.
pub g_beta_powers: Vec<E::G1Affine>,
/// $\{h^b^i\}_{i=0}^{N}$ where N is the smallest size of the two Groth16 CRS.
pub h_beta_powers: Vec<E::G2Affine>,
}
/// ProverSRS is the specialized SRS version for the prover for a specific number of proofs to
/// aggregate. It contains as well the commitment keys for this specific size.
/// Note the size must be a power of two for the moment - if it is not, padding must be
/// applied.
#[derive(Clone, Debug)]
pub struct ProverSRS<E: PairingEngine> {
/// number of proofs to aggregate
pub n: usize,
/// $\{g^a^i\}_{i=0}^{2n-1}$ where n is the number of proofs to be aggregated
/// We take all powers instead of only ones from n -> 2n-1 (w commitment key
/// is formed from these powers) since the prover will create a shifted
/// polynomial of degree 2n-1 when doing the KZG opening proof.
pub g_alpha_powers_table: Vec<E::G1Affine>,
/// $\{h^a^i\}_{i=0}^{n-1}$ - here we don't need to go to 2n-1 since v
/// commitment key only goes up to n-1 exponent.
pub h_alpha_powers_table: Vec<E::G2Affine>,
/// $\{g^b^i\}_{i=0}^{2n-1}$
pub g_beta_powers_table: Vec<E::G1Affine>,
/// $\{h^b^i\}_{i=0}^{n-1}$
pub h_beta_powers_table: Vec<E::G2Affine>,
/// commitment key using in MIPP and TIPP
pub vkey: VKey<E>,
/// commitment key using in TIPP
pub wkey: WKey<E>,
}
/// Contains the necessary elements to verify an aggregated Groth16 proof; it is of fixed size
/// regardless of the number of proofs aggregated. However, a verifier SRS will be determined by
/// the number of proofs being aggregated.
#[derive(Clone, Debug)]
pub struct VerifierSRS<E: PairingEngine> {
pub n: usize,
pub g: E::G1Projective,
pub h: E::G2Projective,
pub g_alpha: E::G1Projective,
pub g_beta: E::G1Projective,
pub h_alpha: E::G2Projective,
pub h_beta: E::G2Projective,
}
impl<E: PairingEngine> PartialEq for GenericSRS<E> {
fn eq(&self, other: &Self) -> bool {
self.g_alpha_powers == other.g_alpha_powers
&& self.g_beta_powers == other.g_beta_powers
&& self.h_alpha_powers == other.h_alpha_powers
&& self.h_beta_powers == other.h_beta_powers
}
}
impl<E: PairingEngine> PartialEq for VerifierSRS<E> {
fn eq(&self, other: &Self) -> bool {
self.g == other.g
&& self.h == other.h
&& self.g_alpha == other.g_alpha
&& self.g_beta == other.g_beta
&& self.h_alpha == other.h_alpha
&& self.h_beta == other.h_beta
}
}
impl<E: PairingEngine> ProverSRS<E> {
/// Returns true if commitment keys have the exact required length.
/// It is necessary for the IPP scheme to work that commitment
/// key have the exact same number of arguments as the number of proofs to
/// aggregate.
pub fn has_correct_len(&self, n: usize) -> bool {
self.vkey.has_correct_len(n) && self.wkey.has_correct_len(n)
}
}
impl<E: PairingEngine> GenericSRS<E> {
/// specializes returns the prover and verifier SRS for a specific number of
/// proofs to aggregate. The number of proofs MUST BE a power of two, it
/// panics otherwise. The number of proofs must be inferior to half of the
/// size of the generic srs otherwise it panics.
pub fn specialize(&self, num_proofs: usize) -> (ProverSRS<E>, VerifierSRS<E>) {
assert!(num_proofs.is_power_of_two());
let tn = 2 * num_proofs; // size of the CRS we need
assert!(self.g_alpha_powers.len() >= tn);
assert!(self.h_alpha_powers.len() >= tn);
assert!(self.g_beta_powers.len() >= tn);
assert!(self.h_beta_powers.len() >= tn);
let n = num_proofs;
// when doing the KZG opening we need _all_ coefficients from 0
// to 2n-1 because the polynomial is of degree 2n-1.
let g_low = 0;
let g_up = tn;
let h_low = 0;
let h_up = h_low + n;
// TODO precompute window
let g_alpha_powers_table = self.g_alpha_powers[g_low..g_up].to_vec();
let g_beta_powers_table = self.g_beta_powers[g_low..g_up].to_vec();
let h_alpha_powers_table = self.h_alpha_powers[h_low..h_up].to_vec();
let h_beta_powers_table = self.h_beta_powers[h_low..h_up].to_vec();
println!(
"\nPROVER SRS -- nun_proofs {}, tn {}, alpha_power_table {}\n",
num_proofs,
tn,
g_alpha_powers_table.len()
);
let v1 = self.h_alpha_powers[h_low..h_up].to_vec();
let v2 = self.h_beta_powers[h_low..h_up].to_vec();
let vkey = VKey::<E> { a: v1, b: v2 };
assert!(vkey.has_correct_len(n));
// however, here we only need the "right" shifted bases for the
// commitment scheme.
let w1 = self.g_alpha_powers[n..g_up].to_vec();
let w2 = self.g_beta_powers[n..g_up].to_vec();
let wkey = WKey::<E> { a: w1, b: w2 };
assert!(wkey.has_correct_len(n));
let pk = ProverSRS::<E> {
g_alpha_powers_table,
g_beta_powers_table,
h_alpha_powers_table,
h_beta_powers_table,
vkey,
wkey,
n,
};
let vk = VerifierSRS::<E> {
n: n,
g: self.g_alpha_powers[0].into_projective(),
h: self.h_alpha_powers[0].into_projective(),
g_alpha: self.g_alpha_powers[1].into_projective(),
g_beta: self.g_beta_powers[1].into_projective(),
h_alpha: self.h_alpha_powers[1].into_projective(),
h_beta: self.h_beta_powers[1].into_projective(),
};
(pk, vk)
}
pub fn write<W: Write>(&self, mut writer: W) -> Result<(), Error> {
(self.g_alpha_powers.len() as u32).serialize(&mut writer)?;
write_vec(
&mut writer,
&self
.g_alpha_powers
.iter()
.map(|e| e.into_projective())
.collect::<Vec<E::G1Projective>>(),
)?;
write_vec(
&mut writer,
&self
.g_beta_powers
.iter()
.map(|e| e.into_projective())
.collect::<Vec<E::G1Projective>>(),
)?;
write_vec(
&mut writer,
&self
.h_alpha_powers
.iter()
.map(|e| e.into_projective())
.collect::<Vec<E::G2Projective>>(),
)?;
write_vec(
&mut writer,
&self
.h_beta_powers
.iter()
.map(|e| e.into_projective())
.collect::<Vec<E::G2Projective>>(),
)?;
Ok(())
}
/// Returns the hash over all powers of this generic srs.
pub fn hash(&self) -> Vec<u8> {
let mut v = Vec::new();
self.write(&mut v).expect("failed to compute hash");
Sha256::digest(&v).to_vec()
}
pub fn read<R: Read>(mut reader: R) -> Result<Self, Error> {
let len = u32::deserialize(&mut reader).map_err(|e| Error::Serialization(e))?;
if len > MAX_SRS_SIZE as u32 {
return Err(Error::InvalidSRS("SRS len > maximum".to_string()));
}
let g_alpha_powers = read_vec(len, &mut reader).map_err(|e| Error::Serialization(e))?;
let g_beta_powers = read_vec(len, &mut reader).map_err(|e| Error::Serialization(e))?;
let h_alpha_powers = read_vec(len, &mut reader).map_err(|e| Error::Serialization(e))?;
let h_beta_powers = read_vec(len, &mut reader).map_err(|e| Error::Serialization(e))?;
Ok(Self {
g_alpha_powers,
g_beta_powers,
h_alpha_powers,
h_beta_powers,
})
}
}
/// Generates a SRS of the given size. It must NOT be used in production, only
/// in testing, as this is insecure given we know the secret exponent of the SRS.
pub fn setup_fake_srs<E: PairingEngine, R: Rng>(rng: &mut R, size: usize) -> GenericSRS<E> {
let alpha = E::Fr::rand(rng);
let beta = E::Fr::rand(rng);
let g = E::G1Projective::prime_subgroup_generator();
let h = E::G2Projective::prime_subgroup_generator();
let mut g_alpha_powers = Vec::new();
let mut g_beta_powers = Vec::new();
let mut h_alpha_powers = Vec::new();
let mut h_beta_powers = Vec::new();
rayon::scope(|s| {
let alpha = α
let h = &h;
let g = &g;
let beta = β
let g_alpha_powers = &mut g_alpha_powers;
s.spawn(move |_| {
*g_alpha_powers = structured_generators_scalar_power(2 * size, g, alpha);
});
let g_beta_powers = &mut g_beta_powers;
s.spawn(move |_| {
*g_beta_powers = structured_generators_scalar_power(2 * size, g, beta);
});
let h_alpha_powers = &mut h_alpha_powers;
s.spawn(move |_| {
*h_alpha_powers = structured_generators_scalar_power(2 * size, h, alpha);
});
let h_beta_powers = &mut h_beta_powers;
s.spawn(move |_| {
*h_beta_powers = structured_generators_scalar_power(2 * size, h, beta);
});
});
debug_assert!(h_alpha_powers[0] == E::G2Affine::prime_subgroup_generator());
debug_assert!(h_beta_powers[0] == E::G2Affine::prime_subgroup_generator());
debug_assert!(g_alpha_powers[0] == E::G1Affine::prime_subgroup_generator());
debug_assert!(g_beta_powers[0] == E::G1Affine::prime_subgroup_generator());
GenericSRS {
g_alpha_powers,
g_beta_powers,
h_alpha_powers,
h_beta_powers,
}
}
pub(crate) fn structured_generators_scalar_power<G: ProjectiveCurve>(
num: usize,
g: &G,
s: &G::ScalarField,
) -> Vec<G::Affine> {
assert!(num > 0);
let mut powers_of_scalar = Vec::with_capacity(num);
let mut pow_s = G::ScalarField::one();
for _ in 0..num {
powers_of_scalar.push(pow_s);
pow_s.mul_assign(s);
}
let scalar_bits = G::ScalarField::size_in_bits();
let window_size = FixedBaseMSM::get_mul_window_size(num);
let g_table = FixedBaseMSM::get_window_table::<G>(scalar_bits, window_size, g.clone());
let powers_of_g = FixedBaseMSM::multi_scalar_mul::<G>(
//let powers_of_g = msm::fixed_base::multi_scalar_mul::<G>(
scalar_bits,
window_size,
&g_table,
&powers_of_scalar[..],
);
powers_of_g.into_iter().map(|v| v.into_affine()).collect()
}
fn write_vec<G: ProjectiveCurve, W: Write>(mut w: W, v: &[G]) -> Result<(), SerializationError> {
for p in v {
p.serialize(&mut w)?;
}
Ok(())
}
fn read_vec<G: CanonicalDeserialize, R: Read>(
len: u32,
mut r: R,
) -> Result<Vec<G>, SerializationError> {
(0..len).map(|_| G::deserialize(&mut r)).collect()
}
#[cfg(test)]
mod test {
use super::*;
use ark_bls12_381::Bls12_381 as Bls12;
use rand_core::SeedableRng;
use std::io::Cursor;
#[test]
fn test_srs_invalid_length() {
let mut rng = rand_chacha::ChaChaRng::seed_from_u64(0u64);
let size = 8;
let srs = setup_fake_srs::<Bls12, _>(&mut rng, size);
let vec_len = srs.g_alpha_powers.len();
let mut buffer = Vec::new();
srs.write(&mut buffer).expect("writing to buffer failed");
// tryingout normal operations
GenericSRS::<Bls12>::read(&mut Cursor::new(&buffer)).expect("can't read the srs");
// trying to read the first size
let read_size = u32::deserialize(Cursor::new(&buffer)).unwrap() as usize;
assert_eq!(vec_len, read_size);
// remove the previous size from the bufer - u32 = 4 bytes
// and replace the size by appending the rest
let mut new_buffer = Vec::new();
let invalid_size = MAX_SRS_SIZE + 1;
(invalid_size as u32)
.serialize(&mut new_buffer)
.expect("failed to write invalid size");
buffer.drain(0..4);
new_buffer.append(&mut buffer);
GenericSRS::<Bls12>::read(&mut Cursor::new(&new_buffer))
.expect_err("this should have failed");
}
}
| 38.143646 | 110 | 0.610443 |
08ccd5802c2de35192fdf54ae7a5abbe75a77c0a | 10,141 | use crate::{addr::Endpoint, auth::*, core::*, error::*, Ctx, CtxHandle};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
/// A `Client` socket is used for advanced request-reply messaging.
///
/// `Client` sockets are threadsafe and can be used from multiple threads at the
/// same time. Note that replies from a `Server` socket will go to the first
/// client thread that calls `recv`. If you need to get replies back to the
/// originating thread, use one `Client` socket per thread.
///
/// When a `Client` socket is connected to multiple sockets, outgoing
/// messages are distributed between connected peers on a round-robin basis.
/// Likewise, the `Client` socket receives messages fairly from each connected peer.
///
/// # Mute State
/// When `Client` socket enters the mute state due to having reached the high water
/// mark, or if there are no peers at all, then any send operations on the
/// socket shall block until the mute state ends or at least one peer becomes
/// available for sending; messages are not discarded.
///
/// # Summary of Characteristics
/// | Characteristic | Value |
/// |:-------------------------:|:----------------------:|
/// | Compatible peer sockets | [`Server`] |
/// | Direction | Bidirectional |
/// | Send/receive pattern | Unrestricted |
/// | Outgoing routing strategy | Round-robin |
/// | Incoming routing strategy | Fair-queued |
/// | Action in mute state | Block |
///
/// # Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{prelude::*, *};
///
/// // Use a system assigned port.
/// let addr: TcpAddr = "127.0.0.1:*".try_into()?;
///
/// let server = ServerBuilder::new()
/// .bind(addr)
/// .build()?;
///
/// // Retrieve the addr that was assigned.
/// let bound = server.last_endpoint()?;
///
/// let client = ClientBuilder::new()
/// .connect(bound)
/// .build()?;
///
/// // Send a string request.
/// client.send("tell me something")?;
///
/// // Receive the client request.
/// let msg = server.recv_msg()?;
/// let id = msg.routing_id().unwrap();
///
/// // Reply to the client.
/// server.route("it takes 224 bits to store a i32 in java", id)?;
///
/// // We send as much replies as we want.
/// server.route("also don't talk to me", id)?;
///
/// // Retreive the first reply.
/// let mut msg = client.recv_msg()?;
/// // And the second.
/// client.recv(&mut msg)?;
/// #
/// # Ok(())
/// # }
/// ```
///
/// [`Server`]: struct.Server.html
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Client {
inner: Arc<RawSocket>,
}
impl Client {
/// Create a `Client` socket from the [`global context`]
///
/// # Returned Error Variants
/// * [`InvalidCtx`]
/// * [`SocketLimit`]
///
/// [`InvalidCtx`]: enum.ErrorKind.html#variant.InvalidCtx
/// [`SocketLimit`]: enum.ErrorKind.html#variant.SocketLimit
/// [`global context`]: struct.Ctx.html#method.global
pub fn new() -> Result<Self, Error> {
let inner = Arc::new(RawSocket::new(RawSocketType::Client)?);
Ok(Self { inner })
}
/// Create a `Client` socket associated with a specific context
/// from a `CtxHandle`.
///
/// # Returned Error Variants
/// * [`InvalidCtx`]
/// * [`SocketLimit`]
///
/// [`InvalidCtx`]: enum.ErrorKind.html#variant.InvalidCtx
/// [`SocketLimit`]: enum.ErrorKind.html#variant.SocketLimit
pub fn with_ctx(handle: CtxHandle) -> Result<Self, Error> {
let inner =
Arc::new(RawSocket::with_ctx(RawSocketType::Client, handle)?);
Ok(Self { inner })
}
/// Returns the handle to the `Ctx` of the socket.
pub fn ctx(&self) -> CtxHandle {
self.inner.ctx()
}
}
impl GetRawSocket for Client {
fn raw_socket(&self) -> &RawSocket {
&self.inner
}
}
impl Heartbeating for Client {}
impl Socket for Client {}
impl SendMsg for Client {}
impl RecvMsg for Client {}
unsafe impl Send for Client {}
unsafe impl Sync for Client {}
/// A configuration for a `Client`.
///
/// Especially helpfull in config files.
// We can't derive and use #[serde(flatten)] because of this issue:
// https://github.com/serde-rs/serde/issues/1346.
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(into = "FlatClientConfig")]
#[serde(from = "FlatClientConfig")]
pub struct ClientConfig {
socket_config: SocketConfig,
send_config: SendConfig,
recv_config: RecvConfig,
heartbeat_config: HeartbeatingConfig,
}
impl ClientConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Client, Error> {
self.with_ctx(Ctx::global())
}
pub fn with_ctx(&self, handle: CtxHandle) -> Result<Client, Error> {
let client = Client::with_ctx(handle)?;
self.apply(&client)?;
Ok(client)
}
pub fn apply(&self, client: &Client) -> Result<(), Error> {
self.send_config.apply(client)?;
self.recv_config.apply(client)?;
self.heartbeat_config.apply(client)?;
self.socket_config.apply(client)?;
Ok(())
}
}
#[derive(Clone, Serialize, Deserialize)]
struct FlatClientConfig {
connect: Option<Vec<Endpoint>>,
bind: Option<Vec<Endpoint>>,
heartbeat: Option<Heartbeat>,
send_hwm: HighWaterMark,
send_timeout: Period,
recv_hwm: HighWaterMark,
recv_timeout: Period,
mechanism: Option<Mechanism>,
}
impl From<ClientConfig> for FlatClientConfig {
fn from(config: ClientConfig) -> Self {
let socket_config = config.socket_config;
let send_config = config.send_config;
let recv_config = config.recv_config;
let heartbeat_config = config.heartbeat_config;
Self {
connect: socket_config.connect,
bind: socket_config.bind,
heartbeat: heartbeat_config.heartbeat,
mechanism: socket_config.mechanism,
send_hwm: send_config.send_hwm,
send_timeout: send_config.send_timeout,
recv_hwm: recv_config.recv_hwm,
recv_timeout: recv_config.recv_timeout,
}
}
}
impl From<FlatClientConfig> for ClientConfig {
fn from(flat: FlatClientConfig) -> Self {
let socket_config = SocketConfig {
connect: flat.connect,
bind: flat.bind,
mechanism: flat.mechanism,
};
let send_config = SendConfig {
send_hwm: flat.send_hwm,
send_timeout: flat.send_timeout,
};
let recv_config = RecvConfig {
recv_hwm: flat.recv_hwm,
recv_timeout: flat.recv_timeout,
};
let heartbeat_config = HeartbeatingConfig {
heartbeat: flat.heartbeat,
};
Self {
socket_config,
send_config,
recv_config,
heartbeat_config,
}
}
}
impl GetSocketConfig for ClientConfig {
fn socket_config(&self) -> &SocketConfig {
&self.socket_config
}
fn socket_config_mut(&mut self) -> &mut SocketConfig {
&mut self.socket_config
}
}
impl ConfigureSocket for ClientConfig {}
impl GetRecvConfig for ClientConfig {
fn recv_config(&self) -> &RecvConfig {
&self.recv_config
}
fn recv_config_mut(&mut self) -> &mut RecvConfig {
&mut self.recv_config
}
}
impl ConfigureRecv for ClientConfig {}
impl GetSendConfig for ClientConfig {
fn send_config(&self) -> &SendConfig {
&self.send_config
}
fn send_config_mut(&mut self) -> &mut SendConfig {
&mut self.send_config
}
}
impl ConfigureSend for ClientConfig {}
impl GetHeartbeatingConfig for ClientConfig {
fn heartbeat_config(&self) -> &HeartbeatingConfig {
&self.heartbeat_config
}
fn heartbeat_config_mut(&mut self) -> &mut HeartbeatingConfig {
&mut self.heartbeat_config
}
}
impl ConfigureHeartbeating for ClientConfig {}
/// A builder for a `Client`.
///
/// Allows for ergonomic one line socket configuration.
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ClientBuilder {
inner: ClientConfig,
}
impl ClientBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Client, Error> {
self.inner.build()
}
pub fn with_ctx(&self, handle: CtxHandle) -> Result<Client, Error> {
self.inner.with_ctx(handle)
}
}
impl GetSocketConfig for ClientBuilder {
fn socket_config(&self) -> &SocketConfig {
self.inner.socket_config()
}
fn socket_config_mut(&mut self) -> &mut SocketConfig {
self.inner.socket_config_mut()
}
}
impl BuildSocket for ClientBuilder {}
impl GetSendConfig for ClientBuilder {
fn send_config(&self) -> &SendConfig {
self.inner.send_config()
}
fn send_config_mut(&mut self) -> &mut SendConfig {
self.inner.send_config_mut()
}
}
impl BuildSend for ClientBuilder {}
impl GetRecvConfig for ClientBuilder {
fn recv_config(&self) -> &RecvConfig {
self.inner.recv_config()
}
fn recv_config_mut(&mut self) -> &mut RecvConfig {
self.inner.recv_config_mut()
}
}
impl BuildRecv for ClientBuilder {}
impl GetHeartbeatingConfig for ClientBuilder {
fn heartbeat_config(&self) -> &HeartbeatingConfig {
self.inner.heartbeat_config()
}
fn heartbeat_config_mut(&mut self) -> &mut HeartbeatingConfig {
self.inner.heartbeat_config_mut()
}
}
impl BuildHeartbeating for ClientBuilder {}
#[cfg(test)]
mod test {
use super::*;
use crate::{prelude::TryInto, InprocAddr};
#[test]
fn test_ser_de() {
let addr: InprocAddr = "test".try_into().unwrap();
let mut config = ClientConfig::new();
config.set_connect(Some(&addr));
let ron = serde_yaml::to_string(&config).unwrap();
let de: ClientConfig = serde_yaml::from_str(&ron).unwrap();
assert_eq!(config, de);
}
}
| 27.482385 | 84 | 0.618874 |
759697a8efd867b072cace25d99fe2d67633864e | 7,442 | //! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
//!
use crate::packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH};
use crate::recvmmsg::NUM_RCVMMSGS;
use solana_measure::thread_mem_usage;
use solana_sdk::timing::{duration_as_ms, timestamp};
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError, SendError, Sender};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::{Duration, Instant};
use thiserror::Error;
pub type PacketReceiver = Receiver<Packets>;
pub type PacketSender = Sender<Packets>;
#[derive(Error, Debug)]
pub enum StreamerError {
#[error("I/O error")]
IO(#[from] std::io::Error),
#[error("receive timeout error")]
RecvTimeoutError(#[from] RecvTimeoutError),
#[error("send packets error")]
SendError(#[from] SendError<Packets>),
}
pub type Result<T> = std::result::Result<T, StreamerError>;
fn recv_loop(
sock: &UdpSocket,
exit: Arc<AtomicBool>,
channel: &PacketSender,
recycler: &PacketsRecycler,
name: &'static str,
) -> Result<()> {
let mut recv_count = 0;
let mut call_count = 0;
let mut now = Instant::now();
let mut num_max_received = 0; // Number of times maximum packets were received
loop {
let mut msgs = Packets::new_with_recycler(recycler.clone(), PACKETS_PER_BATCH, name);
loop {
// Check for exit signal, even if socket is busy
// (for instance the leader transaction socket)
if exit.load(Ordering::Relaxed) {
return Ok(());
}
if let Ok(len) = packet::recv_from(&mut msgs, sock, 1) {
if len == NUM_RCVMMSGS {
num_max_received += 1;
}
recv_count += len;
call_count += 1;
if len > 0 {
channel.send(msgs)?;
}
break;
}
}
if recv_count > 1024 {
datapoint_debug!(
name,
("received", recv_count as i64, i64),
("call_count", i64::from(call_count), i64),
("elapsed", now.elapsed().as_millis() as i64, i64),
("max_received", i64::from(num_max_received), i64),
);
recv_count = 0;
call_count = 0;
num_max_received = 0;
}
now = Instant::now();
}
}
pub fn receiver(
sock: Arc<UdpSocket>,
exit: &Arc<AtomicBool>,
packet_sender: PacketSender,
recycler: PacketsRecycler,
name: &'static str,
) -> JoinHandle<()> {
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
if res.is_err() {
panic!("streamer::receiver set_read_timeout error");
}
let exit = exit.clone();
Builder::new()
.name("solana-receiver".to_string())
.spawn(move || {
thread_mem_usage::datapoint(name);
let _ = recv_loop(&sock, exit, &packet_sender, &recycler.clone(), name);
})
.unwrap()
}
fn recv_send(sock: &UdpSocket, r: &PacketReceiver) -> Result<()> {
let timer = Duration::new(1, 0);
let msgs = r.recv_timeout(timer)?;
send_to(&msgs, sock)?;
Ok(())
}
pub fn recv_batch(recvr: &PacketReceiver, max_batch: usize) -> Result<(Vec<Packets>, usize, u64)> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
let recv_start = Instant::now();
trace!("got msgs");
let mut len = msgs.packets.len();
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
len += more.packets.len();
batch.push(more);
if len > max_batch {
break;
}
}
trace!("batch len {}", batch.len());
Ok((batch, len, duration_as_ms(&recv_start.elapsed())))
}
pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: PacketReceiver) -> JoinHandle<()> {
Builder::new()
.name(format!("solana-responder-{}", name))
.spawn(move || {
let mut errors = 0;
let mut last_error = None;
let mut last_print = 0;
loop {
thread_mem_usage::datapoint(name);
if let Err(e) = recv_send(&sock, &r) {
match e {
StreamerError::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
StreamerError::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => {
errors += 1;
last_error = Some(e);
}
}
}
let now = timestamp();
if now - last_print > 1000 && errors != 0 {
datapoint_info!(name, ("errors", errors, i64),);
info!("{} last-error: {:?} count: {}", name, last_error, errors);
last_print = now;
errors = 0;
}
}
})
.unwrap()
}
#[cfg(test)]
mod test {
use super::*;
use crate::packet::{Packet, Packets, PACKET_DATA_SIZE};
use crate::streamer::{receiver, responder};
use solana_perf::recycler::Recycler;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::time::Duration;
fn get_msgs(r: PacketReceiver, num: &mut usize) -> Result<()> {
for _ in 0..10 {
let m = r.recv_timeout(Duration::new(1, 0));
if m.is_err() {
continue;
}
*num -= m.unwrap().packets.len();
if *num == 0 {
break;
}
}
Ok(())
}
#[test]
fn streamer_debug() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
}
#[test]
fn streamer_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let (s_reader, r_reader) = channel();
let t_receiver = receiver(Arc::new(read), &exit, s_reader, Recycler::default(), "test");
let t_responder = {
let (s_responder, r_responder) = channel();
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
let mut msgs = Packets::default();
for i in 0..5 {
let mut b = Packet::default();
{
b.data[0] = i as u8;
b.meta.size = PACKET_DATA_SIZE;
b.meta.set_addr(&addr);
}
msgs.packets.push(b);
}
s_responder.send(msgs).expect("send");
t_responder
};
let mut num = 5;
get_msgs(r_reader, &mut num).expect("get_msgs");
assert_eq!(num, 0);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}
| 32.640351 | 99 | 0.528621 |
fe96c9b3a9f6a07b090d20a97d20fd0851c9d60b | 28,553 | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! HTML formatting module
//!
//! This module contains a large number of `fmt::Show` implementations for
//! various types in `rustdoc::clean`. These implementations all currently
//! assume that HTML output is desired, although it may be possible to redesign
//! them in the future to instead emit any format desired.
use std::fmt;
use std::string::String;
use syntax::ast;
use syntax::ast_util;
use clean;
use stability_summary::ModuleSummary;
use html::item_type;
use html::item_type::ItemType;
use html::render;
use html::render::{cache_key, current_location_key};
/// Helper to render an optional visibility with a space after it (if the
/// visibility is preset)
pub struct VisSpace(pub Option<ast::Visibility>);
/// Similarly to VisSpace, this structure is used to render a function style with a
/// space after it.
pub struct FnStyleSpace(pub ast::FnStyle);
/// Wrapper struct for properly emitting a method declaration.
pub struct Method<'a>(pub &'a clean::SelfTy, pub &'a clean::FnDecl);
/// Similar to VisSpace, but used for mutability
pub struct MutableSpace(pub clean::Mutability);
/// Similar to VisSpace, but used for mutability
pub struct RawMutableSpace(pub clean::Mutability);
/// Wrapper struct for properly emitting the stability level.
pub struct Stability<'a>(pub &'a Option<clean::Stability>);
/// Wrapper struct for emitting the stability level concisely.
pub struct ConciseStability<'a>(pub &'a Option<clean::Stability>);
/// Wrapper struct for emitting a where clause from Generics.
pub struct WhereClause<'a>(pub &'a clean::Generics);
/// Wrapper struct for emitting type parameter bounds.
struct TyParamBounds<'a>(pub &'a [clean::TyParamBound]);
impl VisSpace {
pub fn get(&self) -> Option<ast::Visibility> {
let VisSpace(v) = *self; v
}
}
impl FnStyleSpace {
pub fn get(&self) -> ast::FnStyle {
let FnStyleSpace(v) = *self; v
}
}
impl<'a> fmt::Show for TyParamBounds<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &TyParamBounds(bounds) = self;
for (i, bound) in bounds.iter().enumerate() {
if i > 0 {
try!(f.write(" + ".as_bytes()));
}
try!(write!(f, "{}", *bound));
}
Ok(())
}
}
impl fmt::Show for clean::Generics {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.lifetimes.len() == 0 && self.type_params.len() == 0 { return Ok(()) }
try!(f.write("<".as_bytes()));
for (i, life) in self.lifetimes.iter().enumerate() {
if i > 0 {
try!(f.write(", ".as_bytes()));
}
try!(write!(f, "{}", *life));
}
if self.type_params.len() > 0 {
if self.lifetimes.len() > 0 {
try!(f.write(", ".as_bytes()));
}
for (i, tp) in self.type_params.iter().enumerate() {
if i > 0 {
try!(f.write(", ".as_bytes()))
}
try!(f.write(tp.name.as_bytes()));
if tp.bounds.len() > 0 {
try!(write!(f, ": {}", TyParamBounds(tp.bounds.as_slice())));
}
match tp.default {
Some(ref ty) => { try!(write!(f, " = {}", ty)); },
None => {}
};
}
}
try!(f.write(">".as_bytes()));
Ok(())
}
}
impl<'a> fmt::Show for WhereClause<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &WhereClause(gens) = self;
if gens.where_predicates.len() == 0 {
return Ok(());
}
try!(f.write(" where ".as_bytes()));
for (i, pred) in gens.where_predicates.iter().enumerate() {
if i > 0 {
try!(f.write(", ".as_bytes()));
}
let bounds = pred.bounds.as_slice();
try!(write!(f, "{}: {}", pred.name, TyParamBounds(bounds)));
}
Ok(())
}
}
impl fmt::Show for clean::Lifetime {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(f.write(self.get_ref().as_bytes()));
Ok(())
}
}
impl fmt::Show for clean::TyParamBound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
clean::RegionBound(ref lt) => {
write!(f, "{}", *lt)
}
clean::TraitBound(ref ty) => {
write!(f, "{}", *ty)
}
}
}
}
impl fmt::Show for clean::Path {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.global {
try!(f.write("::".as_bytes()))
}
for (i, seg) in self.segments.iter().enumerate() {
if i > 0 {
try!(f.write("::".as_bytes()))
}
try!(f.write(seg.name.as_bytes()));
if seg.lifetimes.len() > 0 || seg.types.len() > 0 {
try!(f.write("<".as_bytes()));
let mut comma = false;
for lifetime in seg.lifetimes.iter() {
if comma {
try!(f.write(", ".as_bytes()));
}
comma = true;
try!(write!(f, "{}", *lifetime));
}
for ty in seg.types.iter() {
if comma {
try!(f.write(", ".as_bytes()));
}
comma = true;
try!(write!(f, "{}", *ty));
}
try!(f.write(">".as_bytes()));
}
}
Ok(())
}
}
/// Used when rendering a `ResolvedPath` structure. This invokes the `path`
/// rendering function with the necessary arguments for linking to a local path.
fn resolved_path(w: &mut fmt::Formatter, did: ast::DefId, p: &clean::Path,
print_all: bool) -> fmt::Result {
path(w, p, print_all,
|cache, loc| {
if ast_util::is_local(did) || cache.inlined.contains(&did) {
Some(("../".repeat(loc.len())).to_string())
} else {
match cache.extern_locations[did.krate] {
render::Remote(ref s) => Some(s.to_string()),
render::Local => {
Some(("../".repeat(loc.len())).to_string())
}
render::Unknown => None,
}
}
},
|cache| {
match cache.paths.get(&did) {
None => None,
Some(&(ref fqp, shortty)) => Some((fqp.clone(), shortty))
}
})
}
fn path(w: &mut fmt::Formatter, path: &clean::Path, print_all: bool,
root: |&render::Cache, &[String]| -> Option<String>,
info: |&render::Cache| -> Option<(Vec<String> , ItemType)>)
-> fmt::Result
{
// The generics will get written to both the title and link
let mut generics = String::new();
let last = path.segments.last().unwrap();
if last.lifetimes.len() > 0 || last.types.len() > 0 {
let mut counter = 0u;
generics.push_str("<");
for lifetime in last.lifetimes.iter() {
if counter > 0 { generics.push_str(", "); }
counter += 1;
generics.push_str(format!("{}", *lifetime).as_slice());
}
for ty in last.types.iter() {
if counter > 0 { generics.push_str(", "); }
counter += 1;
generics.push_str(format!("{}", *ty).as_slice());
}
generics.push_str(">");
}
let loc = current_location_key.get().unwrap();
let cache = cache_key.get().unwrap();
let abs_root = root(&**cache, loc.as_slice());
let rel_root = match path.segments[0].name.as_slice() {
"self" => Some("./".to_string()),
_ => None,
};
if print_all {
let amt = path.segments.len() - 1;
match rel_root {
Some(root) => {
let mut root = String::from_str(root.as_slice());
for seg in path.segments[..amt].iter() {
if "super" == seg.name.as_slice() ||
"self" == seg.name.as_slice() {
try!(write!(w, "{}::", seg.name));
} else {
root.push_str(seg.name.as_slice());
root.push_str("/");
try!(write!(w, "<a class='mod'
href='{}index.html'>{}</a>::",
root.as_slice(),
seg.name));
}
}
}
None => {
for seg in path.segments[..amt].iter() {
try!(write!(w, "{}::", seg.name));
}
}
}
}
match info(&**cache) {
// This is a documented path, link to it!
Some((ref fqp, shortty)) if abs_root.is_some() => {
let mut url = String::from_str(abs_root.unwrap().as_slice());
let to_link = fqp[..fqp.len() - 1];
for component in to_link.iter() {
url.push_str(component.as_slice());
url.push_str("/");
}
match shortty {
item_type::Module => {
url.push_str(fqp.last().unwrap().as_slice());
url.push_str("/index.html");
}
_ => {
url.push_str(shortty.to_static_str());
url.push_str(".");
url.push_str(fqp.last().unwrap().as_slice());
url.push_str(".html");
}
}
try!(write!(w, "<a class='{}' href='{}' title='{}'>{}</a>",
shortty, url, fqp.connect("::"), last.name));
}
_ => {
try!(write!(w, "{}", last.name));
}
}
try!(write!(w, "{}", generics.as_slice()));
Ok(())
}
fn primitive_link(f: &mut fmt::Formatter,
prim: clean::PrimitiveType,
name: &str) -> fmt::Result {
let m = cache_key.get().unwrap();
let mut needs_termination = false;
match m.primitive_locations.get(&prim) {
Some(&ast::LOCAL_CRATE) => {
let loc = current_location_key.get().unwrap();
let len = if loc.len() == 0 {0} else {loc.len() - 1};
try!(write!(f, "<a href='{}primitive.{}.html'>",
"../".repeat(len),
prim.to_url_str()));
needs_termination = true;
}
Some(&cnum) => {
let path = &m.paths[ast::DefId {
krate: cnum,
node: ast::CRATE_NODE_ID,
}];
let loc = match m.extern_locations[cnum] {
render::Remote(ref s) => Some(s.to_string()),
render::Local => {
let loc = current_location_key.get().unwrap();
Some("../".repeat(loc.len()))
}
render::Unknown => None,
};
match loc {
Some(root) => {
try!(write!(f, "<a href='{}{}/primitive.{}.html'>",
root,
path.ref0().as_slice().head().unwrap(),
prim.to_url_str()));
needs_termination = true;
}
None => {}
}
}
None => {}
}
try!(write!(f, "{}", name));
if needs_termination {
try!(write!(f, "</a>"));
}
Ok(())
}
/// Helper to render type parameters
fn tybounds(w: &mut fmt::Formatter,
typarams: &Option<Vec<clean::TyParamBound> >) -> fmt::Result {
match *typarams {
Some(ref params) => {
for param in params.iter() {
try!(write!(w, " + "));
try!(write!(w, "{}", *param));
}
Ok(())
}
None => Ok(())
}
}
impl fmt::Show for clean::Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
clean::TyParamBinder(id) => {
let m = cache_key.get().unwrap();
f.write(m.typarams[ast_util::local_def(id)].as_bytes())
}
clean::Generic(did) => {
let m = cache_key.get().unwrap();
f.write(m.typarams[did].as_bytes())
}
clean::ResolvedPath{ did, ref typarams, ref path } => {
try!(resolved_path(f, did, path, false));
tybounds(f, typarams)
}
clean::Self(..) => f.write("Self".as_bytes()),
clean::Primitive(prim) => primitive_link(f, prim, prim.to_string()),
clean::Closure(ref decl) => {
write!(f, "{style}{lifetimes}|{args}|{bounds}{arrow}",
style = FnStyleSpace(decl.fn_style),
lifetimes = if decl.lifetimes.len() == 0 {
"".to_string()
} else {
format!("<{:#}>", decl.lifetimes)
},
args = decl.decl.inputs,
arrow = match decl.decl.output {
clean::Primitive(clean::Unit) => "".to_string(),
_ => format!(" -> {}", decl.decl.output),
},
bounds = {
let mut ret = String::new();
for bound in decl.bounds.iter() {
match *bound {
clean::RegionBound(..) => {}
clean::TraitBound(ref t) => {
if ret.len() == 0 {
ret.push_str(": ");
} else {
ret.push_str(" + ");
}
ret.push_str(format!("{}",
*t).as_slice());
}
}
}
ret
})
}
clean::Proc(ref decl) => {
write!(f, "{style}{lifetimes}proc({args}){bounds}{arrow}",
style = FnStyleSpace(decl.fn_style),
lifetimes = if decl.lifetimes.len() == 0 {
"".to_string()
} else {
format!("<{:#}>", decl.lifetimes)
},
args = decl.decl.inputs,
bounds = if decl.bounds.len() == 0 {
"".to_string()
} else {
let mut m = decl.bounds
.iter()
.map(|s| s.to_string());
format!(
": {}",
m.collect::<Vec<String>>().connect(" + "))
},
arrow = match decl.decl.output {
clean::Primitive(clean::Unit) => "".to_string(),
_ => format!(" -> {}", decl.decl.output)
})
}
clean::BareFunction(ref decl) => {
write!(f, "{}{}fn{}{}",
FnStyleSpace(decl.fn_style),
match decl.abi.as_slice() {
"" => " extern ".to_string(),
"\"Rust\"" => "".to_string(),
s => format!(" extern {} ", s)
},
decl.generics,
decl.decl)
}
clean::Tuple(ref typs) => {
primitive_link(f, clean::PrimitiveTuple,
match typs.as_slice() {
[ref one] => format!("({},)", one),
many => format!("({:#})", many)
}.as_slice())
}
clean::Vector(ref t) => {
primitive_link(f, clean::Slice, format!("[{}]", **t).as_slice())
}
clean::FixedVector(ref t, ref s) => {
primitive_link(f, clean::Slice,
format!("[{}, ..{}]", **t, *s).as_slice())
}
clean::Bottom => f.write("!".as_bytes()),
clean::RawPointer(m, ref t) => {
write!(f, "*{}{}", RawMutableSpace(m), **t)
}
clean::BorrowedRef{ lifetime: ref l, mutability, type_: ref ty} => {
let lt = match *l {
Some(ref l) => format!("{} ", *l),
_ => "".to_string(),
};
let m = MutableSpace(mutability);
match **ty {
clean::Vector(ref bt) => { // BorrowedRef{ ... Vector(T) } is &[T]
match **bt {
clean::Generic(_) =>
primitive_link(f, clean::Slice,
format!("&{}{}[{}]", lt, m, **bt).as_slice()),
_ => {
try!(primitive_link(f, clean::Slice,
format!("&{}{}[", lt, m).as_slice()));
try!(write!(f, "{}", **bt));
primitive_link(f, clean::Slice, "]")
}
}
}
_ => {
write!(f, "&{}{}{}", lt, m, **ty)
}
}
}
clean::Unique(..) => {
panic!("should have been cleaned")
}
}
}
}
impl fmt::Show for clean::Arguments {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (i, input) in self.values.iter().enumerate() {
if i > 0 { try!(write!(f, ", ")); }
if input.name.len() > 0 {
try!(write!(f, "{}: ", input.name));
}
try!(write!(f, "{}", input.type_));
}
Ok(())
}
}
impl fmt::Show for clean::FnDecl {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({args}){arrow}",
args = self.inputs,
arrow = match self.output {
clean::Primitive(clean::Unit) => "".to_string(),
_ => format!(" -> {}", self.output),
})
}
}
impl<'a> fmt::Show for Method<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Method(selfty, d) = *self;
let mut args = String::new();
match *selfty {
clean::SelfStatic => {},
clean::SelfValue => args.push_str("self"),
clean::SelfBorrowed(Some(ref lt), mtbl) => {
args.push_str(format!("&{} {}self", *lt,
MutableSpace(mtbl)).as_slice());
}
clean::SelfBorrowed(None, mtbl) => {
args.push_str(format!("&{}self",
MutableSpace(mtbl)).as_slice());
}
clean::SelfExplicit(ref typ) => {
args.push_str(format!("self: {}", *typ).as_slice());
}
}
for (i, input) in d.inputs.values.iter().enumerate() {
if i > 0 || args.len() > 0 { args.push_str(", "); }
if input.name.len() > 0 {
args.push_str(format!("{}: ", input.name).as_slice());
}
args.push_str(format!("{}", input.type_).as_slice());
}
write!(f, "({args}){arrow}",
args = args,
arrow = match d.output {
clean::Primitive(clean::Unit) => "".to_string(),
_ => format!(" -> {}", d.output),
})
}
}
impl fmt::Show for VisSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.get() {
Some(ast::Public) => write!(f, "pub "),
Some(ast::Inherited) | None => Ok(())
}
}
}
impl fmt::Show for FnStyleSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.get() {
ast::UnsafeFn => write!(f, "unsafe "),
ast::NormalFn => Ok(())
}
}
}
impl fmt::Show for clean::ViewPath {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
clean::SimpleImport(ref name, ref src) => {
if *name == src.path.segments.last().unwrap().name {
write!(f, "use {};", *src)
} else {
write!(f, "use {} as {};", *src, *name)
}
}
clean::GlobImport(ref src) => {
write!(f, "use {}::*;", *src)
}
clean::ImportList(ref src, ref names) => {
try!(write!(f, "use {}::{{", *src));
for (i, n) in names.iter().enumerate() {
if i > 0 {
try!(write!(f, ", "));
}
try!(write!(f, "{}", *n));
}
write!(f, "}};")
}
}
}
}
impl fmt::Show for clean::ImportSource {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.did {
Some(did) => resolved_path(f, did, &self.path, true),
_ => {
for (i, seg) in self.path.segments.iter().enumerate() {
if i > 0 {
try!(write!(f, "::"))
}
try!(write!(f, "{}", seg.name));
}
Ok(())
}
}
}
}
impl fmt::Show for clean::ViewListIdent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.source {
Some(did) => {
let path = clean::Path {
global: false,
segments: vec!(clean::PathSegment {
name: self.name.clone(),
lifetimes: Vec::new(),
types: Vec::new(),
})
};
resolved_path(f, did, &path, false)
}
_ => write!(f, "{}", self.name),
}
}
}
impl fmt::Show for MutableSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
MutableSpace(clean::Immutable) => Ok(()),
MutableSpace(clean::Mutable) => write!(f, "mut "),
}
}
}
impl fmt::Show for RawMutableSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
RawMutableSpace(clean::Immutable) => write!(f, "const "),
RawMutableSpace(clean::Mutable) => write!(f, "mut "),
}
}
}
impl<'a> fmt::Show for Stability<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Stability(stab) = *self;
match *stab {
Some(ref stability) => {
write!(f, "<a class='stability {lvl}' title='{reason}'>{lvl}</a>",
lvl = stability.level.to_string(),
reason = stability.text)
}
None => Ok(())
}
}
}
impl<'a> fmt::Show for ConciseStability<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let ConciseStability(stab) = *self;
match *stab {
Some(ref stability) => {
write!(f, "<a class='stability {lvl}' title='{lvl}{colon}{reason}'></a>",
lvl = stability.level.to_string(),
colon = if stability.text.len() > 0 { ": " } else { "" },
reason = stability.text)
}
None => {
write!(f, "<a class='stability Unmarked' title='No stability level'></a>")
}
}
}
}
impl fmt::Show for ModuleSummary {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt_inner<'a>(f: &mut fmt::Formatter,
context: &mut Vec<&'a str>,
m: &'a ModuleSummary)
-> fmt::Result {
let cnt = m.counts;
let tot = cnt.total();
if tot == 0 { return Ok(()) }
context.push(m.name.as_slice());
let path = context.connect("::");
try!(write!(f, "<tr>"));
try!(write!(f, "<td><a href='{}'>{}</a></td>", {
let mut url = context.slice_from(1).to_vec();
url.push("index.html");
url.connect("/")
},
path));
try!(write!(f, "<td class='summary-column'>"));
try!(write!(f, "<span class='summary Stable' \
style='width: {:.4}%; display: inline-block'> </span>",
(100 * cnt.stable) as f64/tot as f64));
try!(write!(f, "<span class='summary Unstable' \
style='width: {:.4}%; display: inline-block'> </span>",
(100 * cnt.unstable) as f64/tot as f64));
try!(write!(f, "<span class='summary Experimental' \
style='width: {:.4}%; display: inline-block'> </span>",
(100 * cnt.experimental) as f64/tot as f64));
try!(write!(f, "<span class='summary Deprecated' \
style='width: {:.4}%; display: inline-block'> </span>",
(100 * cnt.deprecated) as f64/tot as f64));
try!(write!(f, "<span class='summary Unmarked' \
style='width: {:.4}%; display: inline-block'> </span>",
(100 * cnt.unmarked) as f64/tot as f64));
try!(write!(f, "</td></tr>"));
for submodule in m.submodules.iter() {
try!(fmt_inner(f, context, submodule));
}
context.pop();
Ok(())
}
let mut context = Vec::new();
let tot = self.counts.total();
let (stable, unstable, experimental, deprecated, unmarked) = if tot == 0 {
(0, 0, 0, 0, 0)
} else {
((100 * self.counts.stable)/tot,
(100 * self.counts.unstable)/tot,
(100 * self.counts.experimental)/tot,
(100 * self.counts.deprecated)/tot,
(100 * self.counts.unmarked)/tot)
};
try!(write!(f,
r"<h1 class='fqn'>Stability dashboard: crate <a class='mod' href='index.html'>{name}</a></h1>
This dashboard summarizes the stability levels for all of the public modules of
the crate, according to the total number of items at each level in the module and
its children (percentages total for {name}):
<blockquote>
<a class='stability Stable'></a> stable ({}%),<br/>
<a class='stability Unstable'></a> unstable ({}%),<br/>
<a class='stability Experimental'></a> experimental ({}%),<br/>
<a class='stability Deprecated'></a> deprecated ({}%),<br/>
<a class='stability Unmarked'></a> unmarked ({}%)
</blockquote>
The counts do not include methods or trait
implementations that are visible only through a re-exported type.",
stable, unstable, experimental, deprecated, unmarked,
name=self.name));
try!(write!(f, "<table>"))
try!(fmt_inner(f, &mut context, self));
write!(f, "</table>")
}
}
| 36.985751 | 93 | 0.429692 |
646511c330a3a1cfa178b0b5a7d6d968e9da4013 | 3,337 | use core::fmt;
use lazy_static::lazy_static;
use spin::Mutex;
use volatile::Volatile;
const BUFFER_HEIGHT: usize = 25;
const BUFFER_WIDTH: usize = 80;
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Green, Color::Black),
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($($arg:tt)*) => (crate::print!("{}\n", format_args!($($arg)*)));
}
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::_print(format_args!($($arg)*)));
}
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use core::fmt::Write;
WRITER.lock().write_fmt(args).unwrap();
}
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Color {
Black = 0x0,
Blue = 0x1,
Green = 0x2,
Cyan = 0x3,
Red = 0x4,
Magenta = 0x5,
Brown = 0x6,
LightGray = 0x7,
DarkGray = 0x8,
LightBlue = 0x9,
LightGreen = 0xa,
LightCyan = 0xb,
LightRed = 0xc,
Pink = 0xd,
Yellow = 0xe,
White = 0xf,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
struct ColorCode(u8);
impl ColorCode {
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(C)]
struct ScreenChar {
ascii_character: u8,
color_code: ColorCode,
}
#[repr(transparent)]
struct Buffer {
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
pub struct Writer {
column_position: usize,
color_code: ColorCode,
buffer: &'static mut Buffer,
}
impl Writer {
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: self.color_code,
});
self.column_position += 1;
}
}
}
fn new_line(&mut self) {
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row - 1][col].write(self.buffer.chars[row][col].read());
}
}
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
0x20..=0x7e | b'\n' => self.write_byte(byte),
// Not printable
_ => self.write_byte(0xfe),
}
}
}
}
impl fmt::Write for Writer {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
| 23.666667 | 90 | 0.543302 |
e469ca80c590abbb24e096388199c854781f993a | 52,996 | use crate::diagnostics::{ImportSuggestion, LabelSuggestion, TypoSuggestion};
use crate::late::lifetimes::{ElisionFailureInfo, LifetimeContext};
use crate::late::{LateResolutionVisitor, RibKind};
use crate::path_names_to_string;
use crate::{CrateLint, Module, ModuleKind, ModuleOrUniformRoot};
use crate::{PathResult, PathSource, Segment};
use rustc_ast::ast::{self, Expr, ExprKind, Item, ItemKind, NodeId, Path, Ty, TyKind};
use rustc_ast::util::lev_distance::find_best_match_for_name;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder};
use rustc_hir as hir;
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, DefKind};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX};
use rustc_hir::PrimTy;
use rustc_session::config::nightly_options;
use rustc_span::hygiene::MacroKind;
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Span;
use log::debug;
type Res = def::Res<ast::NodeId>;
/// A field or associated item from self type suggested in case of resolution failure.
enum AssocSuggestion {
Field,
MethodWithSelf,
AssocItem,
}
crate enum MissingLifetimeSpot<'tcx> {
Generics(&'tcx hir::Generics<'tcx>),
HigherRanked { span: Span, span_type: ForLifetimeSpanType },
}
crate enum ForLifetimeSpanType {
BoundEmpty,
BoundTail,
TypeEmpty,
TypeTail,
}
impl ForLifetimeSpanType {
crate fn descr(&self) -> &'static str {
match self {
Self::BoundEmpty | Self::BoundTail => "bound",
Self::TypeEmpty | Self::TypeTail => "type",
}
}
crate fn suggestion(&self, sugg: &str) -> String {
match self {
Self::BoundEmpty | Self::TypeEmpty => format!("for<{}> ", sugg),
Self::BoundTail | Self::TypeTail => format!(", {}", sugg),
}
}
}
impl<'tcx> Into<MissingLifetimeSpot<'tcx>> for &'tcx hir::Generics<'tcx> {
fn into(self) -> MissingLifetimeSpot<'tcx> {
MissingLifetimeSpot::Generics(self)
}
}
fn is_self_type(path: &[Segment], namespace: Namespace) -> bool {
namespace == TypeNS && path.len() == 1 && path[0].ident.name == kw::SelfUpper
}
fn is_self_value(path: &[Segment], namespace: Namespace) -> bool {
namespace == ValueNS && path.len() == 1 && path[0].ident.name == kw::SelfLower
}
/// Gets the stringified path for an enum from an `ImportSuggestion` for an enum variant.
fn import_candidate_to_enum_paths(suggestion: &ImportSuggestion) -> (String, String) {
let variant_path = &suggestion.path;
let variant_path_string = path_names_to_string(variant_path);
let path_len = suggestion.path.segments.len();
let enum_path = ast::Path {
span: suggestion.path.span,
segments: suggestion.path.segments[0..path_len - 1].to_vec(),
};
let enum_path_string = path_names_to_string(&enum_path);
(variant_path_string, enum_path_string)
}
impl<'a> LateResolutionVisitor<'a, '_, '_> {
/// Handles error reporting for `smart_resolve_path_fragment` function.
/// Creates base error and amends it with one short label and possibly some longer helps/notes.
pub(crate) fn smart_resolve_report_errors(
&mut self,
path: &[Segment],
span: Span,
source: PathSource<'_>,
res: Option<Res>,
) -> (DiagnosticBuilder<'a>, Vec<ImportSuggestion>) {
let ident_span = path.last().map_or(span, |ident| ident.ident.span);
let ns = source.namespace();
let is_expected = &|res| source.is_expected(res);
let is_enum_variant = &|res| {
if let Res::Def(DefKind::Variant, _) = res { true } else { false }
};
// Make the base error.
let expected = source.descr_expected();
let path_str = Segment::names_to_string(path);
let item_str = path.last().unwrap().ident;
let (base_msg, fallback_label, base_span, could_be_expr) = if let Some(res) = res {
(
format!("expected {}, found {} `{}`", expected, res.descr(), path_str),
format!("not a {}", expected),
span,
match res {
Res::Def(DefKind::Fn, _) => {
// Verify whether this is a fn call or an Fn used as a type.
self.r
.session
.source_map()
.span_to_snippet(span)
.map(|snippet| snippet.ends_with(')'))
.unwrap_or(false)
}
Res::Def(
DefKind::Ctor(..) | DefKind::AssocFn | DefKind::Const | DefKind::AssocConst,
_,
)
| Res::SelfCtor(_)
| Res::PrimTy(_)
| Res::Local(_) => true,
_ => false,
},
)
} else {
let item_span = path.last().unwrap().ident.span;
let (mod_prefix, mod_str) = if path.len() == 1 {
(String::new(), "this scope".to_string())
} else if path.len() == 2 && path[0].ident.name == kw::PathRoot {
(String::new(), "the crate root".to_string())
} else {
let mod_path = &path[..path.len() - 1];
let mod_prefix =
match self.resolve_path(mod_path, Some(TypeNS), false, span, CrateLint::No) {
PathResult::Module(ModuleOrUniformRoot::Module(module)) => module.res(),
_ => None,
}
.map_or(String::new(), |res| format!("{} ", res.descr()));
(mod_prefix, format!("`{}`", Segment::names_to_string(mod_path)))
};
(
format!("cannot find {} `{}` in {}{}", expected, item_str, mod_prefix, mod_str),
if path_str == "async" && expected.starts_with("struct") {
"`async` blocks are only allowed in the 2018 edition".to_string()
} else {
format!("not found in {}", mod_str)
},
item_span,
false,
)
};
let code = source.error_code(res.is_some());
let mut err = self.r.session.struct_span_err_with_code(base_span, &base_msg, code);
// Emit help message for fake-self from other languages (e.g., `this` in Javascript).
if ["this", "my"].contains(&&*item_str.as_str())
&& self.self_value_is_available(path[0].ident.span, span)
{
err.span_suggestion(
span,
"did you mean",
"self".to_string(),
Applicability::MaybeIncorrect,
);
}
// Emit special messages for unresolved `Self` and `self`.
if is_self_type(path, ns) {
err.code(rustc_errors::error_code!(E0411));
err.span_label(
span,
"`Self` is only available in impls, traits, and type definitions".to_string(),
);
return (err, Vec::new());
}
if is_self_value(path, ns) {
debug!("smart_resolve_path_fragment: E0424, source={:?}", source);
err.code(rustc_errors::error_code!(E0424));
err.span_label(span, match source {
PathSource::Pat => "`self` value is a keyword and may not be bound to variables or shadowed"
.to_string(),
_ => "`self` value is a keyword only available in methods with a `self` parameter"
.to_string(),
});
if let Some((fn_kind, span)) = &self.diagnostic_metadata.current_function {
// The current function has a `self' parameter, but we were unable to resolve
// a reference to `self`. This can only happen if the `self` identifier we
// are resolving came from a different hygiene context.
if fn_kind.decl().inputs.get(0).map(|p| p.is_self()).unwrap_or(false) {
err.span_label(*span, "this function has a `self` parameter, but a macro invocation can only access identifiers it receives from parameters");
} else {
err.span_label(*span, "this function doesn't have a `self` parameter");
}
}
return (err, Vec::new());
}
// Try to lookup name in more relaxed fashion for better error reporting.
let ident = path.last().unwrap().ident;
let candidates = self
.r
.lookup_import_candidates(ident, ns, &self.parent_scope, is_expected)
.drain(..)
.filter(|ImportSuggestion { did, .. }| {
match (did, res.and_then(|res| res.opt_def_id())) {
(Some(suggestion_did), Some(actual_did)) => *suggestion_did != actual_did,
_ => true,
}
})
.collect::<Vec<_>>();
let crate_def_id = DefId::local(CRATE_DEF_INDEX);
if candidates.is_empty() && is_expected(Res::Def(DefKind::Enum, crate_def_id)) {
let enum_candidates =
self.r.lookup_import_candidates(ident, ns, &self.parent_scope, is_enum_variant);
let mut enum_candidates = enum_candidates
.iter()
.map(|suggestion| import_candidate_to_enum_paths(&suggestion))
.collect::<Vec<_>>();
enum_candidates.sort();
if !enum_candidates.is_empty() {
// Contextualize for E0412 "cannot find type", but don't belabor the point
// (that it's a variant) for E0573 "expected type, found variant".
let preamble = if res.is_none() {
let others = match enum_candidates.len() {
1 => String::new(),
2 => " and 1 other".to_owned(),
n => format!(" and {} others", n),
};
format!("there is an enum variant `{}`{}; ", enum_candidates[0].0, others)
} else {
String::new()
};
let msg = format!("{}try using the variant's enum", preamble);
err.span_suggestions(
span,
&msg,
enum_candidates
.into_iter()
.map(|(_variant_path, enum_ty_path)| enum_ty_path)
// Variants re-exported in prelude doesn't mean `prelude::v1` is the
// type name!
// FIXME: is there a more principled way to do this that
// would work for other re-exports?
.filter(|enum_ty_path| enum_ty_path != "std::prelude::v1")
// Also write `Option` rather than `std::prelude::v1::Option`.
.map(|enum_ty_path| {
// FIXME #56861: DRY-er prelude filtering.
enum_ty_path.trim_start_matches("std::prelude::v1::").to_owned()
}),
Applicability::MachineApplicable,
);
}
}
if path.len() == 1 && self.self_type_is_available(span) {
if let Some(candidate) = self.lookup_assoc_candidate(ident, ns, is_expected) {
let self_is_available = self.self_value_is_available(path[0].ident.span, span);
match candidate {
AssocSuggestion::Field => {
if self_is_available {
err.span_suggestion(
span,
"you might have meant to use the available field",
format!("self.{}", path_str),
Applicability::MachineApplicable,
);
} else {
err.span_label(span, "a field by this name exists in `Self`");
}
}
AssocSuggestion::MethodWithSelf if self_is_available => {
err.span_suggestion(
span,
"try",
format!("self.{}", path_str),
Applicability::MachineApplicable,
);
}
AssocSuggestion::MethodWithSelf | AssocSuggestion::AssocItem => {
err.span_suggestion(
span,
"try",
format!("Self::{}", path_str),
Applicability::MachineApplicable,
);
}
}
return (err, candidates);
}
// If the first argument in call is `self` suggest calling a method.
if let Some((call_span, args_span)) = self.call_has_self_arg(source) {
let mut args_snippet = String::new();
if let Some(args_span) = args_span {
if let Ok(snippet) = self.r.session.source_map().span_to_snippet(args_span) {
args_snippet = snippet;
}
}
err.span_suggestion(
call_span,
&format!("try calling `{}` as a method", ident),
format!("self.{}({})", path_str, args_snippet),
Applicability::MachineApplicable,
);
return (err, candidates);
}
}
// Try Levenshtein algorithm.
let typo_sugg = self.lookup_typo_candidate(path, ns, is_expected, span);
let levenshtein_worked = self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span);
// Try context-dependent help if relaxed lookup didn't work.
if let Some(res) = res {
if self.smart_resolve_context_dependent_help(
&mut err,
span,
source,
res,
&path_str,
&fallback_label,
) {
return (err, candidates);
}
}
// Fallback label.
if !levenshtein_worked {
err.span_label(base_span, fallback_label);
self.type_ascription_suggestion(&mut err, base_span);
match self.diagnostic_metadata.current_let_binding {
Some((pat_sp, Some(ty_sp), None)) if ty_sp.contains(base_span) && could_be_expr => {
err.span_suggestion_short(
pat_sp.between(ty_sp),
"use `=` if you meant to assign",
" = ".to_string(),
Applicability::MaybeIncorrect,
);
}
_ => {}
}
}
(err, candidates)
}
/// Check if the source is call expression and the first argument is `self`. If true,
/// return the span of whole call and the span for all arguments expect the first one (`self`).
fn call_has_self_arg(&self, source: PathSource<'_>) -> Option<(Span, Option<Span>)> {
let mut has_self_arg = None;
if let PathSource::Expr(parent) = source {
match &parent?.kind {
ExprKind::Call(_, args) if !args.is_empty() => {
let mut expr_kind = &args[0].kind;
loop {
match expr_kind {
ExprKind::Path(_, arg_name) if arg_name.segments.len() == 1 => {
if arg_name.segments[0].ident.name == kw::SelfLower {
let call_span = parent.unwrap().span;
let tail_args_span = if args.len() > 1 {
Some(Span::new(
args[1].span.lo(),
args.last().unwrap().span.hi(),
call_span.ctxt(),
))
} else {
None
};
has_self_arg = Some((call_span, tail_args_span));
}
break;
}
ExprKind::AddrOf(_, _, expr) => expr_kind = &expr.kind,
_ => break,
}
}
}
_ => (),
}
};
has_self_arg
}
fn followed_by_brace(&self, span: Span) -> (bool, Option<Span>) {
// HACK(estebank): find a better way to figure out that this was a
// parser issue where a struct literal is being used on an expression
// where a brace being opened means a block is being started. Look
// ahead for the next text to see if `span` is followed by a `{`.
let sm = self.r.session.source_map();
let mut sp = span;
loop {
sp = sm.next_point(sp);
match sm.span_to_snippet(sp) {
Ok(ref snippet) => {
if snippet.chars().any(|c| !c.is_whitespace()) {
break;
}
}
_ => break,
}
}
let followed_by_brace = match sm.span_to_snippet(sp) {
Ok(ref snippet) if snippet == "{" => true,
_ => false,
};
// In case this could be a struct literal that needs to be surrounded
// by parentheses, find the appropriate span.
let mut i = 0;
let mut closing_brace = None;
loop {
sp = sm.next_point(sp);
match sm.span_to_snippet(sp) {
Ok(ref snippet) => {
if snippet == "}" {
closing_brace = Some(span.to(sp));
break;
}
}
_ => break,
}
i += 1;
// The bigger the span, the more likely we're incorrect --
// bound it to 100 chars long.
if i > 100 {
break;
}
}
(followed_by_brace, closing_brace)
}
/// Provides context-dependent help for errors reported by the `smart_resolve_path_fragment`
/// function.
/// Returns `true` if able to provide context-dependent help.
fn smart_resolve_context_dependent_help(
&mut self,
err: &mut DiagnosticBuilder<'a>,
span: Span,
source: PathSource<'_>,
res: Res,
path_str: &str,
fallback_label: &str,
) -> bool {
let ns = source.namespace();
let is_expected = &|res| source.is_expected(res);
let path_sep = |err: &mut DiagnosticBuilder<'_>, expr: &Expr| match expr.kind {
ExprKind::Field(_, ident) => {
err.span_suggestion(
expr.span,
"use the path separator to refer to an item",
format!("{}::{}", path_str, ident),
Applicability::MaybeIncorrect,
);
true
}
ExprKind::MethodCall(ref segment, ..) => {
let span = expr.span.with_hi(segment.ident.span.hi());
err.span_suggestion(
span,
"use the path separator to refer to an item",
format!("{}::{}", path_str, segment.ident),
Applicability::MaybeIncorrect,
);
true
}
_ => false,
};
let mut bad_struct_syntax_suggestion = |def_id: DefId| {
let (followed_by_brace, closing_brace) = self.followed_by_brace(span);
let mut suggested = false;
match source {
PathSource::Expr(Some(parent)) => {
suggested = path_sep(err, &parent);
}
PathSource::Expr(None) if followed_by_brace => {
if let Some(sp) = closing_brace {
err.multipart_suggestion(
"surround the struct literal with parentheses",
vec![
(sp.shrink_to_lo(), "(".to_string()),
(sp.shrink_to_hi(), ")".to_string()),
],
Applicability::MaybeIncorrect,
);
} else {
err.span_label(
span, // Note the parentheses surrounding the suggestion below
format!(
"you might want to surround a struct literal with parentheses: \
`({} {{ /* fields */ }})`?",
path_str
),
);
}
suggested = true;
}
_ => {}
}
if !suggested {
if let Some(span) = self.r.opt_span(def_id) {
err.span_label(span, &format!("`{}` defined here", path_str));
}
err.span_label(span, format!("did you mean `{} {{ /* fields */ }}`?", path_str));
}
};
match (res, source) {
(Res::Def(DefKind::Macro(MacroKind::Bang), _), _) => {
err.span_suggestion_verbose(
span.shrink_to_hi(),
"use `!` to invoke the macro",
"!".to_string(),
Applicability::MaybeIncorrect,
);
if path_str == "try" && span.rust_2015() {
err.note("if you want the `try` keyword, you need to be in the 2018 edition");
}
}
(Res::Def(DefKind::TyAlias, def_id), PathSource::Trait(_)) => {
err.span_label(span, "type aliases cannot be used as traits");
if nightly_options::is_nightly_build() {
let msg = "you might have meant to use `#![feature(trait_alias)]` instead of a \
`type` alias";
if let Some(span) = self.r.opt_span(def_id) {
err.span_help(span, msg);
} else {
err.help(msg);
}
}
}
(Res::Def(DefKind::Mod, _), PathSource::Expr(Some(parent))) => {
if !path_sep(err, &parent) {
return false;
}
}
(Res::Def(DefKind::Enum, def_id), PathSource::TupleStruct | PathSource::Expr(..)) => {
if let Some(variants) = self.collect_enum_variants(def_id) {
if !variants.is_empty() {
let msg = if variants.len() == 1 {
"try using the enum's variant"
} else {
"try using one of the enum's variants"
};
err.span_suggestions(
span,
msg,
variants.iter().map(path_names_to_string),
Applicability::MaybeIncorrect,
);
}
} else {
err.note("did you mean to use one of the enum's variants?");
}
}
(Res::Def(DefKind::Struct, def_id), _) if ns == ValueNS => {
if let Some((ctor_def, ctor_vis)) = self.r.struct_constructors.get(&def_id).cloned()
{
let accessible_ctor =
self.r.is_accessible_from(ctor_vis, self.parent_scope.module);
if is_expected(ctor_def) && !accessible_ctor {
err.span_label(
span,
"constructor is not visible here due to private fields".to_string(),
);
}
} else {
bad_struct_syntax_suggestion(def_id);
}
}
(
Res::Def(
DefKind::Union | DefKind::Variant | DefKind::Ctor(_, CtorKind::Fictive),
def_id,
),
_,
) if ns == ValueNS => {
bad_struct_syntax_suggestion(def_id);
}
(Res::Def(DefKind::Ctor(_, CtorKind::Fn), def_id), _) if ns == ValueNS => {
if let Some(span) = self.r.opt_span(def_id) {
err.span_label(span, &format!("`{}` defined here", path_str));
}
err.span_label(span, format!("did you mean `{}( /* fields */ )`?", path_str));
}
(Res::SelfTy(..), _) if ns == ValueNS => {
err.span_label(span, fallback_label);
err.note("can't use `Self` as a constructor, you must use the implemented struct");
}
(Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), _) if ns == ValueNS => {
err.note("can't use a type alias as a constructor");
}
_ => return false,
}
true
}
fn lookup_assoc_candidate<FilterFn>(
&mut self,
ident: Ident,
ns: Namespace,
filter_fn: FilterFn,
) -> Option<AssocSuggestion>
where
FilterFn: Fn(Res) -> bool,
{
fn extract_node_id(t: &Ty) -> Option<NodeId> {
match t.kind {
TyKind::Path(None, _) => Some(t.id),
TyKind::Rptr(_, ref mut_ty) => extract_node_id(&mut_ty.ty),
// This doesn't handle the remaining `Ty` variants as they are not
// that commonly the self_type, it might be interesting to provide
// support for those in future.
_ => None,
}
}
// Fields are generally expected in the same contexts as locals.
if filter_fn(Res::Local(ast::DUMMY_NODE_ID)) {
if let Some(node_id) =
self.diagnostic_metadata.current_self_type.as_ref().and_then(extract_node_id)
{
// Look for a field with the same name in the current self_type.
if let Some(resolution) = self.r.partial_res_map.get(&node_id) {
match resolution.base_res() {
Res::Def(DefKind::Struct | DefKind::Union, did)
if resolution.unresolved_segments() == 0 =>
{
if let Some(field_names) = self.r.field_names.get(&did) {
if field_names
.iter()
.any(|&field_name| ident.name == field_name.node)
{
return Some(AssocSuggestion::Field);
}
}
}
_ => {}
}
}
}
}
for assoc_type_ident in &self.diagnostic_metadata.current_trait_assoc_types {
if *assoc_type_ident == ident {
return Some(AssocSuggestion::AssocItem);
}
}
// Look for associated items in the current trait.
if let Some((module, _)) = self.current_trait_ref {
if let Ok(binding) = self.r.resolve_ident_in_module(
ModuleOrUniformRoot::Module(module),
ident,
ns,
&self.parent_scope,
false,
module.span,
) {
let res = binding.res();
if filter_fn(res) {
return Some(if self.r.has_self.contains(&res.def_id()) {
AssocSuggestion::MethodWithSelf
} else {
AssocSuggestion::AssocItem
});
}
}
}
None
}
fn lookup_typo_candidate(
&mut self,
path: &[Segment],
ns: Namespace,
filter_fn: &impl Fn(Res) -> bool,
span: Span,
) -> Option<TypoSuggestion> {
let mut names = Vec::new();
if path.len() == 1 {
// Search in lexical scope.
// Walk backwards up the ribs in scope and collect candidates.
for rib in self.ribs[ns].iter().rev() {
// Locals and type parameters
for (ident, &res) in &rib.bindings {
if filter_fn(res) {
names.push(TypoSuggestion::from_res(ident.name, res));
}
}
// Items in scope
if let RibKind::ModuleRibKind(module) = rib.kind {
// Items from this module
self.r.add_module_candidates(module, &mut names, &filter_fn);
if let ModuleKind::Block(..) = module.kind {
// We can see through blocks
} else {
// Items from the prelude
if !module.no_implicit_prelude {
let extern_prelude = self.r.extern_prelude.clone();
names.extend(extern_prelude.iter().flat_map(|(ident, _)| {
self.r
.crate_loader
.maybe_process_path_extern(ident.name, ident.span)
.and_then(|crate_id| {
let crate_mod = Res::Def(
DefKind::Mod,
DefId { krate: crate_id, index: CRATE_DEF_INDEX },
);
if filter_fn(crate_mod) {
Some(TypoSuggestion::from_res(ident.name, crate_mod))
} else {
None
}
})
}));
if let Some(prelude) = self.r.prelude {
self.r.add_module_candidates(prelude, &mut names, &filter_fn);
}
}
break;
}
}
}
// Add primitive types to the mix
if filter_fn(Res::PrimTy(PrimTy::Bool)) {
names.extend(
self.r.primitive_type_table.primitive_types.iter().map(|(name, prim_ty)| {
TypoSuggestion::from_res(*name, Res::PrimTy(*prim_ty))
}),
)
}
} else {
// Search in module.
let mod_path = &path[..path.len() - 1];
if let PathResult::Module(module) =
self.resolve_path(mod_path, Some(TypeNS), false, span, CrateLint::No)
{
if let ModuleOrUniformRoot::Module(module) = module {
self.r.add_module_candidates(module, &mut names, &filter_fn);
}
}
}
let name = path[path.len() - 1].ident.name;
// Make sure error reporting is deterministic.
names.sort_by_cached_key(|suggestion| suggestion.candidate.as_str());
match find_best_match_for_name(
names.iter().map(|suggestion| &suggestion.candidate),
&name.as_str(),
None,
) {
Some(found) if found != name => {
names.into_iter().find(|suggestion| suggestion.candidate == found)
}
_ => None,
}
}
/// Only used in a specific case of type ascription suggestions
fn get_colon_suggestion_span(&self, start: Span) -> Span {
let sm = self.r.session.source_map();
start.to(sm.next_point(start))
}
fn type_ascription_suggestion(&self, err: &mut DiagnosticBuilder<'_>, base_span: Span) {
let sm = self.r.session.source_map();
let base_snippet = sm.span_to_snippet(base_span);
if let Some(sp) = self.diagnostic_metadata.current_type_ascription.last() {
let mut sp = *sp;
loop {
// Try to find the `:`; bail on first non-':' / non-whitespace.
sp = sm.next_point(sp);
if let Ok(snippet) = sm.span_to_snippet(sp.to(sm.next_point(sp))) {
let line_sp = sm.lookup_char_pos(sp.hi()).line;
let line_base_sp = sm.lookup_char_pos(base_span.lo()).line;
if snippet == ":" {
let mut show_label = true;
if line_sp != line_base_sp {
err.span_suggestion_short(
sp,
"did you mean to use `;` here instead?",
";".to_string(),
Applicability::MaybeIncorrect,
);
} else {
let colon_sp = self.get_colon_suggestion_span(sp);
let after_colon_sp =
self.get_colon_suggestion_span(colon_sp.shrink_to_hi());
if !sm
.span_to_snippet(after_colon_sp)
.map(|s| s == " ")
.unwrap_or(false)
{
err.span_suggestion(
colon_sp,
"maybe you meant to write a path separator here",
"::".to_string(),
Applicability::MaybeIncorrect,
);
show_label = false;
}
if let Ok(base_snippet) = base_snippet {
let mut sp = after_colon_sp;
for _ in 0..100 {
// Try to find an assignment
sp = sm.next_point(sp);
let snippet = sm.span_to_snippet(sp.to(sm.next_point(sp)));
match snippet {
Ok(ref x) if x.as_str() == "=" => {
err.span_suggestion(
base_span,
"maybe you meant to write an assignment here",
format!("let {}", base_snippet),
Applicability::MaybeIncorrect,
);
show_label = false;
break;
}
Ok(ref x) if x.as_str() == "\n" => break,
Err(_) => break,
Ok(_) => {}
}
}
}
}
if show_label {
err.span_label(
base_span,
"expecting a type here because of type ascription",
);
}
break;
} else if !snippet.trim().is_empty() {
debug!("tried to find type ascription `:` token, couldn't find it");
break;
}
} else {
break;
}
}
}
}
fn find_module(&mut self, def_id: DefId) -> Option<(Module<'a>, ImportSuggestion)> {
let mut result = None;
let mut seen_modules = FxHashSet::default();
let mut worklist = vec![(self.r.graph_root, Vec::new())];
while let Some((in_module, path_segments)) = worklist.pop() {
// abort if the module is already found
if result.is_some() {
break;
}
in_module.for_each_child(self.r, |_, ident, _, name_binding| {
// abort if the module is already found or if name_binding is private external
if result.is_some() || !name_binding.vis.is_visible_locally() {
return;
}
if let Some(module) = name_binding.module() {
// form the path
let mut path_segments = path_segments.clone();
path_segments.push(ast::PathSegment::from_ident(ident));
let module_def_id = module.def_id().unwrap();
if module_def_id == def_id {
let path = Path { span: name_binding.span, segments: path_segments };
result = Some((
module,
ImportSuggestion {
did: Some(def_id),
descr: "module",
path,
accessible: true,
},
));
} else {
// add the module to the lookup
if seen_modules.insert(module_def_id) {
worklist.push((module, path_segments));
}
}
}
});
}
result
}
fn collect_enum_variants(&mut self, def_id: DefId) -> Option<Vec<Path>> {
self.find_module(def_id).map(|(enum_module, enum_import_suggestion)| {
let mut variants = Vec::new();
enum_module.for_each_child(self.r, |_, ident, _, name_binding| {
if let Res::Def(DefKind::Variant, _) = name_binding.res() {
let mut segms = enum_import_suggestion.path.segments.clone();
segms.push(ast::PathSegment::from_ident(ident));
variants.push(Path { span: name_binding.span, segments: segms });
}
});
variants
})
}
crate fn report_missing_type_error(
&self,
path: &[Segment],
) -> Option<(Span, &'static str, String, Applicability)> {
let (ident, span) = match path {
[segment] if !segment.has_generic_args => {
(segment.ident.to_string(), segment.ident.span)
}
_ => return None,
};
let mut iter = ident.chars().map(|c| c.is_uppercase());
let single_uppercase_char =
matches!(iter.next(), Some(true)) && matches!(iter.next(), None);
if !self.diagnostic_metadata.currently_processing_generics && !single_uppercase_char {
return None;
}
match (self.diagnostic_metadata.current_item, single_uppercase_char) {
(Some(Item { kind: ItemKind::Fn(..), ident, .. }), _) if ident.name == sym::main => {
// Ignore `fn main()` as we don't want to suggest `fn main<T>()`
}
(
Some(Item {
kind:
kind @ ItemKind::Fn(..)
| kind @ ItemKind::Enum(..)
| kind @ ItemKind::Struct(..)
| kind @ ItemKind::Union(..),
..
}),
true,
)
| (Some(Item { kind, .. }), false) => {
// Likely missing type parameter.
if let Some(generics) = kind.generics() {
if span.overlaps(generics.span) {
// Avoid the following:
// error[E0405]: cannot find trait `A` in this scope
// --> $DIR/typo-suggestion-named-underscore.rs:CC:LL
// |
// L | fn foo<T: A>(x: T) {} // Shouldn't suggest underscore
// | ^- help: you might be missing a type parameter: `, A`
// | |
// | not found in this scope
return None;
}
let msg = "you might be missing a type parameter";
let (span, sugg) = if let [.., param] = &generics.params[..] {
let span = if let [.., bound] = ¶m.bounds[..] {
bound.span()
} else {
param.ident.span
};
(span, format!(", {}", ident))
} else {
(generics.span, format!("<{}>", ident))
};
// Do not suggest if this is coming from macro expansion.
if !span.from_expansion() {
return Some((
span.shrink_to_hi(),
msg,
sugg,
Applicability::MaybeIncorrect,
));
}
}
}
_ => {}
}
None
}
/// Given the target `label`, search the `rib_index`th label rib for similarly named labels,
/// optionally returning the closest match and whether it is reachable.
crate fn suggestion_for_label_in_rib(
&self,
rib_index: usize,
label: Ident,
) -> Option<LabelSuggestion> {
// Are ribs from this `rib_index` within scope?
let within_scope = self.is_label_valid_from_rib(rib_index);
let rib = &self.label_ribs[rib_index];
let names = rib
.bindings
.iter()
.filter(|(id, _)| id.span.ctxt() == label.span.ctxt())
.map(|(id, _)| &id.name);
find_best_match_for_name(names, &label.as_str(), None).map(|symbol| {
// Upon finding a similar name, get the ident that it was from - the span
// contained within helps make a useful diagnostic. In addition, determine
// whether this candidate is within scope.
let (ident, _) = rib.bindings.iter().find(|(ident, _)| ident.name == symbol).unwrap();
(*ident, within_scope)
})
}
}
impl<'tcx> LifetimeContext<'_, 'tcx> {
crate fn report_missing_lifetime_specifiers(
&self,
span: Span,
count: usize,
) -> DiagnosticBuilder<'tcx> {
struct_span_err!(
self.tcx.sess,
span,
E0106,
"missing lifetime specifier{}",
pluralize!(count)
)
}
crate fn emit_undeclared_lifetime_error(&self, lifetime_ref: &hir::Lifetime) {
let mut err = struct_span_err!(
self.tcx.sess,
lifetime_ref.span,
E0261,
"use of undeclared lifetime name `{}`",
lifetime_ref
);
err.span_label(lifetime_ref.span, "undeclared lifetime");
for missing in &self.missing_named_lifetime_spots {
match missing {
MissingLifetimeSpot::Generics(generics) => {
let (span, sugg) = if let Some(param) =
generics.params.iter().find(|p| match p.kind {
hir::GenericParamKind::Type {
synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
..
} => false,
_ => true,
}) {
(param.span.shrink_to_lo(), format!("{}, ", lifetime_ref))
} else {
(generics.span, format!("<{}>", lifetime_ref))
};
err.span_suggestion(
span,
&format!("consider introducing lifetime `{}` here", lifetime_ref),
sugg,
Applicability::MaybeIncorrect,
);
}
MissingLifetimeSpot::HigherRanked { span, span_type } => {
err.span_suggestion(
*span,
&format!(
"consider making the {} lifetime-generic with a new `{}` lifetime",
span_type.descr(),
lifetime_ref
),
span_type.suggestion(&lifetime_ref.to_string()),
Applicability::MaybeIncorrect,
);
err.note(
"for more information on higher-ranked polymorphism, visit \
https://doc.rust-lang.org/nomicon/hrtb.html",
);
}
}
}
err.emit();
}
crate fn is_trait_ref_fn_scope(&mut self, trait_ref: &'tcx hir::PolyTraitRef<'tcx>) -> bool {
if let def::Res::Def(_, did) = trait_ref.trait_ref.path.res {
if [
self.tcx.lang_items().fn_once_trait(),
self.tcx.lang_items().fn_trait(),
self.tcx.lang_items().fn_mut_trait(),
]
.contains(&Some(did))
{
let (span, span_type) = match &trait_ref.bound_generic_params {
[] => (trait_ref.span.shrink_to_lo(), ForLifetimeSpanType::BoundEmpty),
[.., bound] => (bound.span.shrink_to_hi(), ForLifetimeSpanType::BoundTail),
};
self.missing_named_lifetime_spots
.push(MissingLifetimeSpot::HigherRanked { span, span_type });
return true;
}
};
false
}
crate fn add_missing_lifetime_specifiers_label(
&self,
err: &mut DiagnosticBuilder<'_>,
span: Span,
count: usize,
lifetime_names: &FxHashSet<Ident>,
params: &[ElisionFailureInfo],
) {
let snippet = self.tcx.sess.source_map().span_to_snippet(span).ok();
err.span_label(
span,
&format!(
"expected {} lifetime parameter{}",
if count == 1 { "named".to_string() } else { count.to_string() },
pluralize!(count)
),
);
let suggest_existing = |err: &mut DiagnosticBuilder<'_>, sugg| {
err.span_suggestion_verbose(
span,
&format!("consider using the `{}` lifetime", lifetime_names.iter().next().unwrap()),
sugg,
Applicability::MaybeIncorrect,
);
};
let suggest_new = |err: &mut DiagnosticBuilder<'_>, sugg: &str| {
for missing in self.missing_named_lifetime_spots.iter().rev() {
let mut introduce_suggestion = vec![];
let msg;
let should_break;
introduce_suggestion.push(match missing {
MissingLifetimeSpot::Generics(generics) => {
msg = "consider introducing a named lifetime parameter".to_string();
should_break = true;
if let Some(param) = generics.params.iter().find(|p| match p.kind {
hir::GenericParamKind::Type {
synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
..
} => false,
_ => true,
}) {
(param.span.shrink_to_lo(), "'a, ".to_string())
} else {
(generics.span, "<'a>".to_string())
}
}
MissingLifetimeSpot::HigherRanked { span, span_type } => {
msg = format!(
"consider making the {} lifetime-generic with a new `'a` lifetime",
span_type.descr(),
);
should_break = false;
err.note(
"for more information on higher-ranked polymorphism, visit \
https://doc.rust-lang.org/nomicon/hrtb.html",
);
(*span, span_type.suggestion("'a"))
}
});
for param in params {
if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(param.span) {
if snippet.starts_with('&') && !snippet.starts_with("&'") {
introduce_suggestion
.push((param.span, format!("&'a {}", &snippet[1..])));
} else if snippet.starts_with("&'_ ") {
introduce_suggestion
.push((param.span, format!("&'a {}", &snippet[4..])));
}
}
}
introduce_suggestion.push((span, sugg.to_string()));
err.multipart_suggestion(&msg, introduce_suggestion, Applicability::MaybeIncorrect);
if should_break {
break;
}
}
};
match (lifetime_names.len(), lifetime_names.iter().next(), snippet.as_deref()) {
(1, Some(name), Some("&")) => {
suggest_existing(err, format!("&{} ", name));
}
(1, Some(name), Some("'_")) => {
suggest_existing(err, name.to_string());
}
(1, Some(name), Some("")) => {
suggest_existing(err, format!("{}, ", name).repeat(count));
}
(1, Some(name), Some(snippet)) if !snippet.ends_with('>') => {
suggest_existing(
err,
format!(
"{}<{}>",
snippet,
std::iter::repeat(name.to_string())
.take(count)
.collect::<Vec<_>>()
.join(", ")
),
);
}
(0, _, Some("&")) if count == 1 => {
suggest_new(err, "&'a ");
}
(0, _, Some("'_")) if count == 1 => {
suggest_new(err, "'a");
}
(0, _, Some(snippet)) if !snippet.ends_with('>') && count == 1 => {
suggest_new(err, &format!("{}<'a>", snippet));
}
(n, ..) if n > 1 => {
let spans: Vec<Span> = lifetime_names.iter().map(|lt| lt.span).collect();
err.span_note(spans, "these named lifetimes are available to use");
if Some("") == snippet.as_deref() {
// This happens when we have `Foo<T>` where we point at the space before `T`,
// but this can be confusing so we give a suggestion with placeholders.
err.span_suggestion_verbose(
span,
"consider using one of the available lifetimes here",
"'lifetime, ".repeat(count),
Applicability::HasPlaceholders,
);
}
}
_ => {}
}
}
}
| 42.704271 | 162 | 0.445015 |
79e554727b929cd24ddab6d3b416a91568c51e16 | 182,773 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#![allow(clippy::declare_interior_mutable_const)]
use crate::bool_domain::BoolDomain;
use crate::constant_domain::ConstantDomain;
use crate::environment::Environment;
use crate::expression::Expression::{ConditionalExpression, Join};
use crate::expression::{Expression, ExpressionType};
use crate::interval_domain::{self, IntervalDomain};
use crate::k_limits;
use crate::path::PathRefinement;
use crate::path::{Path, PathEnum, PathSelector};
use crate::tag_domain::{Tag, TagDomain};
use crate::known_names::KnownNames;
use log_derive::{logfn, logfn_inputs};
use mirai_annotations::*;
use serde::{Deserialize, Serialize};
use std::cell::RefCell;
use std::collections::HashSet;
use std::fmt::{Debug, Formatter, Result};
use std::hash::Hash;
use std::hash::Hasher;
use std::rc::Rc;
// See https://github.com/facebookexperimental/MIRAI/blob/master/documentation/AbstractValues.md.
/// Mirai is an abstract interpreter and thus produces abstract values.
/// In general, an abstract value is a value that is not fully known.
/// For example, we may know that it is a number between 1 and 10, but not
/// which particular number.
///
/// When we do know everything about a value, it is concrete rather than
/// abstract, but is convenient to just use this structure for concrete values
/// as well, since all operations can be uniform.
#[derive(Serialize, Deserialize, Clone, Eq, Ord, PartialOrd)]
pub struct AbstractValue {
// This is not a domain element, but a representation of how this value has been constructed.
// It is used to refine the value with respect to path conditions and actual arguments.
// It is also used to construct corresponding domain elements, when needed.
pub expression: Expression,
// Keeps track of how large the expression is.
// When an expression gets too large it needs to get widened otherwise execution time diverges.
pub expression_size: u64,
/// Cached interval domain element computed on demand by get_as_interval.
#[serde(skip)]
interval: RefCell<Option<Rc<IntervalDomain>>>,
/// Cached tag domain element computed on demand by get_tags.
#[serde(skip)]
tags: RefCell<Option<Rc<TagDomain>>>,
}
impl Debug for AbstractValue {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
self.expression.fmt(f)
}
}
impl Hash for AbstractValue {
fn hash<H: Hasher>(&self, state: &mut H) {
self.expression.hash(state);
}
}
impl PartialEq for AbstractValue {
#[logfn_inputs(TRACE)]
fn eq(&self, other: &Self) -> bool {
self.expression.eq(&other.expression)
}
}
/// An abstract domain element that all represent the impossible concrete value.
/// I.e. the corresponding set of possible concrete values is empty.
pub const BOTTOM: AbstractValue = AbstractValue {
expression: Expression::Bottom,
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
};
/// An abstract domain element that all represent the single concrete value, false.
pub const FALSE: AbstractValue = AbstractValue {
expression: Expression::CompileTimeConstant(ConstantDomain::False),
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
};
/// An abstract domain element that all represents all possible concrete values.
pub const TOP: AbstractValue = AbstractValue {
expression: Expression::Top,
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
};
/// An abstract domain element that all represent the single concrete value, true.
pub const TRUE: AbstractValue = AbstractValue {
expression: Expression::CompileTimeConstant(ConstantDomain::True),
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
};
/// An abstract domain element that represents a dummy untagged value.
/// It is only used as the default value for the tag field of non-scalar values.
pub const DUMMY_UNTAGGED_VALUE: AbstractValue = AbstractValue {
expression: Expression::CompileTimeConstant(ConstantDomain::I128(0)),
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
};
impl From<bool> for AbstractValue {
#[logfn_inputs(TRACE)]
fn from(b: bool) -> AbstractValue {
if b {
AbstractValue {
expression: Expression::CompileTimeConstant(ConstantDomain::True),
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
}
} else {
AbstractValue {
expression: Expression::CompileTimeConstant(ConstantDomain::False),
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
}
}
}
}
impl From<ConstantDomain> for AbstractValue {
#[logfn_inputs(TRACE)]
fn from(cv: ConstantDomain) -> AbstractValue {
if let ConstantDomain::Bottom = &cv {
BOTTOM
} else {
AbstractValue {
expression: Expression::CompileTimeConstant(cv),
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
}
}
}
}
impl From<BoolDomain> for AbstractValue {
#[logfn_inputs(TRACE)]
fn from(b: BoolDomain) -> AbstractValue {
match b {
BoolDomain::Bottom => BOTTOM,
BoolDomain::True => TRUE,
BoolDomain::False => FALSE,
BoolDomain::Top => TOP,
}
}
}
impl From<u128> for AbstractValue {
#[logfn_inputs(TRACE)]
fn from(cv: u128) -> AbstractValue {
AbstractValue {
expression: Expression::CompileTimeConstant(ConstantDomain::U128(cv)),
expression_size: 1,
interval: RefCell::new(None),
tags: RefCell::new(None),
}
}
}
impl AbstractValue {
/// Creates an abstract value from a binary expression and keeps track of the size.
#[logfn_inputs(TRACE)]
fn make_binary(
mut left: Rc<AbstractValue>,
mut right: Rc<AbstractValue>,
operation: fn(Rc<AbstractValue>, Rc<AbstractValue>) -> Expression,
) -> Rc<AbstractValue> {
if left.is_top() || left.is_bottom() {
return left;
}
if right.is_top() || right.is_bottom() {
return right;
}
let mut expression_size = left.expression_size.saturating_add(right.expression_size);
if expression_size > k_limits::MAX_EXPRESSION_SIZE {
// The overall expression is going to overflow, so pre-compute the simpler domains from
// the larger expression and then replace its expression with TOP.
if left.expression_size < right.expression_size {
right = AbstractValue::make_from(right.expression.clone(), u64::MAX);
expression_size = left.expression_size + 1;
} else {
left = AbstractValue::make_from(left.expression.clone(), u64::MAX);
expression_size = right.expression_size + 1;
}
}
Self::make_from(operation(left, right), expression_size)
}
/// Creates an abstract value from a typed binary expression and keeps track of the size.
#[logfn_inputs(TRACE)]
fn make_typed_binary(
left: Rc<AbstractValue>,
right: Rc<AbstractValue>,
result_type: ExpressionType,
operation: fn(Rc<AbstractValue>, Rc<AbstractValue>, ExpressionType) -> Expression,
) -> Rc<AbstractValue> {
if left.is_top() || left.is_bottom() {
return left;
}
if right.is_top() || right.is_bottom() {
return right;
}
let expression_size = left.expression_size.saturating_add(right.expression_size);
Self::make_from(operation(left, right, result_type), expression_size)
}
/// Creates an abstract value from a typed unary expression and keeps track of the size.
#[logfn_inputs(TRACE)]
fn make_typed_unary(
operand: Rc<AbstractValue>,
result_type: ExpressionType,
operation: fn(Rc<AbstractValue>, ExpressionType) -> Expression,
) -> Rc<AbstractValue> {
if operand.is_top() || operand.is_bottom() {
return operand;
}
let expression_size = operand.expression_size.saturating_add(1);
Self::make_from(operation(operand, result_type), expression_size)
}
/// Creates an abstract value from a unary expression and keeps track of the size.
#[logfn_inputs(TRACE)]
fn make_unary(
operand: Rc<AbstractValue>,
operation: fn(Rc<AbstractValue>) -> Expression,
) -> Rc<AbstractValue> {
if operand.is_top() || operand.is_bottom() {
return operand;
}
let expression_size = operand.expression_size.saturating_add(1);
Self::make_from(operation(operand), expression_size)
}
/// Creates an abstract value that represents a call to a function whose summary is not
/// known at the time of the call. This is usually because the function has no MIR body,
/// but can also be because the function is self recursive and thus gets called before it
/// has been summarized.
#[logfn_inputs(TRACE)]
fn make_uninterpreted_call(
callee: Rc<AbstractValue>,
arguments: Vec<Rc<AbstractValue>>,
result_type: ExpressionType,
path: Rc<Path>,
) -> Rc<AbstractValue> {
//todo: compute the expression size
AbstractValue::make_from(
Expression::UninterpretedCall {
callee,
arguments,
result_type,
path,
},
1,
)
}
/// Returns a tag check on `operand`. If we can decide the presence/absence of tag, return
/// TRUE/FALSE. Otherwise, returns an unknown tag check.
#[logfn_inputs(TRACE)]
pub fn make_tag_check(
operand: Rc<AbstractValue>,
tag: Tag,
checking_presence: bool,
) -> Rc<AbstractValue> {
let check_value = if checking_presence {
operand.has_tag(&tag)
} else {
operand.does_not_have_tag(&tag)
};
if check_value.is_top() {
// Cannot refine this tag check. Return again an unknown tag check.
let expression_size = operand.expression_size.saturating_add(1);
AbstractValue::make_from(
Expression::UnknownTagCheck {
operand,
tag,
checking_presence,
},
expression_size,
)
} else {
check_value
}
}
/// Creates an abstract value from the given expression and size.
/// Initializes the optional domains to None.
#[logfn_inputs(TRACE)]
pub fn make_from(expression: Expression, expression_size: u64) -> Rc<AbstractValue> {
if expression_size > k_limits::MAX_EXPRESSION_SIZE {
if expression_size < u64::MAX {
trace!("expression {:?}", expression);
info!("Maximum expression size exceeded");
}
// If the expression gets too large, refining it gets expensive and composing it
// into other expressions leads to exponential growth. We therefore need to abstract
// (go up in the lattice). We do that by making the expression a typed variable and
// by eagerly computing and caching any other domains, such as the interval domain.
let var_type = expression.infer_type();
let val = Rc::new(AbstractValue {
expression,
expression_size,
interval: RefCell::new(None),
tags: RefCell::new(None),
});
let interval = val.get_as_interval();
let tags = val.get_tags();
Rc::new(AbstractValue {
expression: Expression::Variable {
path: Path::new_computed(TOP.into()),
var_type,
},
expression_size: 1,
interval: RefCell::new(Some(Rc::new(interval))),
tags: RefCell::new(Some(Rc::new(tags))),
})
} else {
Rc::new(AbstractValue {
expression,
expression_size,
interval: RefCell::new(None),
tags: RefCell::new(None),
})
}
}
/// Creates an abstract value that is a reference to the memory named by the given path.
#[logfn_inputs(TRACE)]
pub fn make_reference(path: Rc<Path>) -> Rc<AbstractValue> {
if let PathEnum::Offset { value } = &path.value {
return value.clone();
}
let path_length = path.path_length() as u64;
AbstractValue::make_from(Expression::Reference(path), path_length)
}
/// Creates an abstract value about which nothing is known other than its type and address.
#[logfn_inputs(TRACE)]
pub fn make_typed_unknown(var_type: ExpressionType, path: Rc<Path>) -> Rc<AbstractValue> {
let path = path.remove_initial_value_wrapper();
AbstractValue::make_from(Expression::Variable { path, var_type }, 1)
}
/// Creates an abstract value about which nothing is known other than its type, address and that
/// it is rooted in a parameter. This is used to refer to the value of a parameter as it was
/// before any assignments to it. When transferred into a calling context, this value must be
/// refined with the environment as it was at the start of the call.
#[logfn_inputs(TRACE)]
pub fn make_initial_parameter_value(
var_type: ExpressionType,
path: Rc<Path>,
) -> Rc<AbstractValue> {
AbstractValue::make_from(Expression::InitialParameterValue { path, var_type }, 1)
}
/// Creates an abstract value which represents the result of comparing the left operand with
/// the right operand, according to the rules of memcmp in unix.
#[logfn_inputs(TRACE)]
pub fn make_memcmp(
left: Rc<AbstractValue>,
right: Rc<AbstractValue>,
length: Rc<AbstractValue>,
) -> Rc<AbstractValue> {
let expression_size = length
.expression_size
.saturating_add(left.expression_size)
.saturating_add(right.expression_size);
AbstractValue::make_from(
Expression::Memcmp {
left,
right,
length,
},
expression_size,
)
}
}
pub trait AbstractValueTrait: Sized {
fn addition(&self, other: Self) -> Self;
fn add_overflows(&self, other: Self, target_type: ExpressionType) -> Self;
fn add_tag(&self, tag: Tag) -> Self;
fn and(&self, other: Self) -> Self;
fn as_bool_if_known(&self) -> Option<bool>;
fn as_int_if_known(&self) -> Option<Rc<AbstractValue>>;
fn bit_and(&self, other: Self) -> Self;
fn bit_not(&self, target_type: ExpressionType) -> Self;
fn bit_or(&self, other: Self) -> Self;
fn bit_xor(&self, other: Self) -> Self;
fn cast(&self, target_type: ExpressionType) -> Self;
fn conditional_expression(&self, consequent: Self, alternate: Self) -> Self;
fn dereference(&self, target_type: ExpressionType) -> Self;
fn divide(&self, other: Self) -> Self;
fn does_not_have_tag(&self, tag: &Tag) -> Rc<AbstractValue>;
fn equals(&self, other: Self) -> Self;
fn greater_or_equal(&self, other: Self) -> Self;
fn greater_than(&self, other: Self) -> Self;
fn has_tag(&self, tag: &Tag) -> Rc<AbstractValue>;
fn implies(&self, other: &Self) -> bool;
fn implies_not(&self, other: &Self) -> bool;
fn intrinsic_binary(&self, other: Self, name: KnownNames) -> Self;
fn intrinsic_bit_vector_unary(&self, bit_length: u8, name: KnownNames) -> Self;
fn intrinsic_floating_point_unary(&self, name: KnownNames) -> Self;
fn inverse_implies(&self, other: &Rc<AbstractValue>) -> bool;
fn inverse_implies_not(&self, other: &Rc<AbstractValue>) -> bool;
fn is_bottom(&self) -> bool;
fn is_compile_time_constant(&self) -> bool;
fn is_contained_in_zeroed_heap_block(&self) -> bool;
fn is_top(&self) -> bool;
fn is_unit(&self) -> bool;
fn join(&self, other: Self, path: &Rc<Path>) -> Self;
fn less_or_equal(&self, other: Self) -> Self;
fn less_than(&self, other: Self) -> Self;
fn might_benefit_from_refinement(&self) -> bool;
fn multiply(&self, other: Self) -> Self;
fn mul_overflows(&self, other: Self, target_type: ExpressionType) -> Self;
fn negate(self) -> Self;
fn not_equals(&self, other: Self) -> Self;
fn logical_not(&self) -> Self;
fn offset(&self, other: Self) -> Self;
fn or(&self, other: Self) -> Self;
fn record_heap_blocks_and_strings(&self, result: &mut HashSet<Rc<AbstractValue>>);
fn remainder(&self, other: Self) -> Self;
fn remove_conjuncts_that_depend_on(&self, variables: &HashSet<Rc<Path>>) -> Self;
fn shift_left(&self, other: Self) -> Self;
fn shl_overflows(&self, other: Self, target_type: ExpressionType) -> Self;
fn shr(&self, other: Self, expression_type: ExpressionType) -> Self;
fn shr_overflows(&self, other: Self, target_type: ExpressionType) -> Self;
fn subtract(&self, other: Self) -> Self;
fn sub_overflows(&self, other: Self, target_type: ExpressionType) -> Self;
fn subset(&self, other: &Self) -> bool;
fn switch(
&self,
cases: Vec<(Rc<AbstractValue>, Rc<AbstractValue>)>,
default: Rc<AbstractValue>,
) -> Rc<AbstractValue>;
fn try_to_retype_as(&self, target_type: &ExpressionType) -> Self;
fn try_to_simplify_binary_op(
&self,
other: Self,
const_op: fn(&ConstantDomain, &ConstantDomain) -> ConstantDomain,
recursive_op: fn(&Self, Self) -> Self,
operation: fn(Self, Self) -> Self,
) -> Self;
fn try_to_distribute_binary_op(
&self,
other: Self,
recursive_op: fn(&Self, Self) -> Self,
operation: fn(Self, Self) -> Self,
) -> Self;
fn get_cached_interval(&self) -> Rc<IntervalDomain>;
fn get_as_interval(&self) -> IntervalDomain;
fn get_cached_tags(&self) -> Rc<TagDomain>;
fn get_tags(&self) -> TagDomain;
fn get_widened_subexpression(&self, path: &Rc<Path>) -> Option<Rc<AbstractValue>>;
fn refine_parameters_and_paths(
&self,
args: &[(Rc<Path>, Rc<AbstractValue>)],
result: &Option<Rc<Path>>,
pre_env: &Environment,
post_env: &Environment,
fresh: usize,
) -> Self;
fn refine_with(&self, path_condition: &Self, depth: usize) -> Self;
fn transmute(&self, target_type: ExpressionType) -> Self;
fn try_resolve_as_byte_array(&self, _environment: &Environment) -> Option<Vec<u8>>;
fn try_resolve_as_ref_to_str(&self, environment: &Environment) -> Option<Rc<str>>;
fn uninterpreted_call(
&self,
arguments: Vec<Rc<AbstractValue>>,
result_type: ExpressionType,
path: Rc<Path>,
) -> Self;
fn unsigned_modulo(&self, num_bits: u8) -> Self;
fn unsigned_shift_left(&self, num_bits: u8) -> Self;
fn unsigned_shift_right(&self, num_bits: u8) -> Self;
fn uses(&self, variables: &HashSet<Rc<Path>>) -> bool;
fn widen(&self, path: &Rc<Path>) -> Self;
}
impl AbstractValueTrait for Rc<AbstractValue> {
/// Returns an element that is "self + other".
#[logfn_inputs(TRACE)]
fn addition(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
// [x + 0] -> x
if let Expression::CompileTimeConstant(ConstantDomain::U128(0))
| Expression::CompileTimeConstant(ConstantDomain::I128(0)) = &other.expression
{
return self.clone();
}
// [0 + x] -> x
if let Expression::CompileTimeConstant(ConstantDomain::U128(0))
| Expression::CompileTimeConstant(ConstantDomain::I128(0)) = &self.expression
{
return other;
}
// [(x + c1) + c2] -> x + c3 where c3 = c1 + c2
if let Expression::Add { left, right } = &self.expression {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&right.expression, &other.expression)
{
let folded = v1.add(v2);
if folded != ConstantDomain::Bottom {
return left.addition(Rc::new(folded.into()));
}
}
}
// [x + (-y)] -> x - y
if let Expression::Neg { operand } = &other.expression {
return self.subtract(operand.clone());
}
self.try_to_simplify_binary_op(other, ConstantDomain::add, Self::addition, |l, r| {
AbstractValue::make_binary(l, r, |left, right| Expression::Add { left, right })
})
}
/// Returns an element that is true if "self + other" is not in range of target_type.
#[logfn_inputs(TRACE)]
fn add_overflows(
&self,
other: Rc<AbstractValue>,
target_type: ExpressionType,
) -> Rc<AbstractValue> {
// [x + 0] -> x
if let Expression::CompileTimeConstant(ConstantDomain::U128(0))
| Expression::CompileTimeConstant(ConstantDomain::I128(0)) = &other.expression
{
return Rc::new(FALSE);
}
// [0 + x] -> x
if let Expression::CompileTimeConstant(ConstantDomain::U128(0))
| Expression::CompileTimeConstant(ConstantDomain::I128(0)) = &self.expression
{
return Rc::new(FALSE);
}
// [(x + c1) + c2] -> x + c3 where c3 = c1 + c2
if let Expression::Add { left, right } = &self.expression {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&right.expression, &other.expression)
{
let folded = v1.add(v2);
if folded != ConstantDomain::Bottom {
return left.add_overflows(Rc::new(folded.into()), target_type);
}
}
}
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.add_overflows(v2, &target_type);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
let interval = self.get_cached_interval().add(&other.get_cached_interval());
if interval.is_contained_in(&target_type) {
return Rc::new(FALSE);
}
AbstractValue::make_typed_binary(
self.clone(),
other,
target_type,
|left, right, result_type| Expression::AddOverflows {
left,
right,
result_type,
},
)
}
/// Returns an element that is `self` attached with `tag`.
#[logfn_inputs(TRACE)]
fn add_tag(&self, tag: Tag) -> Rc<AbstractValue> {
if self.is_bottom() || self.is_top() {
self.clone()
} else {
AbstractValue::make_from(
Expression::TaggedExpression {
operand: self.clone(),
tag,
},
self.expression_size.saturating_add(1),
)
}
}
/// Returns an element that is "self && other".
#[logfn_inputs(TRACE)]
fn and(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
fn is_contained_in(x: &Rc<AbstractValue>, y: &Rc<AbstractValue>) -> bool {
if *x == *y {
return true;
}
if let Expression::And { left, right } = &y.expression {
is_contained_in(x, left) || is_contained_in(x, right)
} else {
false
}
}
// Do these tests here lest BOTTOM get simplified away.
if self.is_bottom() {
return self.clone();
}
if other.is_bottom() {
return other;
}
let self_bool = self.as_bool_if_known();
if let Some(false) = self_bool {
// [false && other] -> false
return Rc::new(FALSE);
};
let other_bool = other.as_bool_if_known();
if let Some(false) = other_bool {
// [self && false] -> false
return Rc::new(FALSE);
};
if self_bool.unwrap_or(false) {
if other_bool.unwrap_or(false) {
// [true && true] -> true
Rc::new(TRUE)
} else {
// [true && other] -> other
other
}
} else if other_bool.unwrap_or(false) {
// [self && true] -> self
self.clone()
} else {
// [x && (x && y)] -> x && y
// [y && (x && y)] -> x && y
// [(x && y) && x] -> x && y
// [(x && y) && y] -> x && y
if is_contained_in(self, &other) {
return other;
} else if is_contained_in(&other, self) {
return self.clone();
}
match &self.expression {
Expression::LogicalNot { operand } if *operand == other => {
// [!x && x] -> false
return Rc::new(FALSE);
}
Expression::Or { left: x, right: y } => {
// [(x || y) && x] -> x
// [(x || y) && y] -> y
if other.implies(x) || other.implies(y) {
return other;
}
if let Expression::LogicalNot { operand } = &other.expression {
// [(x || y) && (!x)] -> y
if *x == *operand {
return y.clone();
}
// [(x || y) && (!y)] -> x
if *y == *operand {
return x.clone();
}
}
}
_ => (),
}
match &other.expression {
Expression::And { left: x, right: y } => {
// [x && (x && y)] -> x && y
// [y && (x && y)] -> x && y
if *x == *self || *y == *self {
return other.clone();
}
}
Expression::LogicalNot { operand } if *operand == *self => {
// [x && !x] -> false
return Rc::new(FALSE);
}
Expression::Or { left: x, right: y } => {
// [x && (x || y)] -> x
// [y && (x || y)] -> y
if *x == *self || *y == *self {
return self.clone();
}
if let Expression::LogicalNot { operand } = &self.expression {
// [(!x) && (x || y)] -> y
if *x == *operand {
return y.clone();
}
// [(!y) && (x || y) ] -> x
if *y == *operand {
return x.clone();
}
}
// [x && (x && y || x && z)] -> x && (y || z)
if let (
Expression::And { left: x1, right: y },
Expression::And { left: x2, right: z },
) = (&x.expression, &y.expression)
{
if *self == *x1 && *self == *x2 {
return self.and(y.or(z.clone()));
}
}
}
_ => (),
}
match (&self.expression, &other.expression) {
// [!x && !y] -> !(x || y)
(Expression::LogicalNot { operand: x }, Expression::LogicalNot { operand: y }) => {
return x.or(y.clone()).logical_not();
}
// [!(x && y) && x] -> x
// [!(x && y) && y] -> y
(Expression::LogicalNot { operand }, _) => {
if let Expression::And { left: x, right: y } = &operand.expression {
if *x == other || *y == other {
return other;
}
}
}
// [(x || (y && z)) && y] -> [(x && y) || (y && z && y)] -> (x && y) || (y && z)
(Expression::Or { left: x, right: yz }, y) => {
if let Expression::And { left: y1, right: z } = &yz.expression {
if y1.expression == *y {
return x.and(y1.clone()).or(y1.and(z.clone()));
}
}
}
_ => (),
}
let other = if self_bool.is_none() {
other.refine_with(self, 7)
} else {
other
};
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::And {
left,
right,
})
}
}
/// The Boolean value of this expression, if known, otherwise None.
#[logfn_inputs(TRACE)]
fn as_bool_if_known(&self) -> Option<bool> {
match self.expression {
Expression::CompileTimeConstant(ConstantDomain::True) => Some(true),
Expression::CompileTimeConstant(ConstantDomain::False) => Some(false),
_ => {
// todo: ask other domains about this (construct some if need be).
None
}
}
}
/// If the concrete Boolean value of this abstract value is known, return it as a UI28 constant,
/// otherwise return None.
#[logfn_inputs(TRACE)]
fn as_int_if_known(&self) -> Option<Rc<AbstractValue>> {
self.as_bool_if_known()
.map(|b| Rc::new(ConstantDomain::U128(b as u128).into()))
}
/// Returns an element that is "self & other".
#[logfn_inputs(TRACE)]
fn bit_and(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
let self_bool = self.as_bool_if_known();
if let Some(false) = self_bool {
// [false & y] -> false
return Rc::new(FALSE);
};
let other_bool = other.as_bool_if_known();
if let Some(false) = other_bool {
// [x & false] -> false
return Rc::new(FALSE);
};
if let Expression::CompileTimeConstant(ConstantDomain::I128(0))
| Expression::CompileTimeConstant(ConstantDomain::U128(0)) = self.expression
{
// [0 & y] -> 0
return self.clone();
}
if let Expression::CompileTimeConstant(ConstantDomain::I128(0))
| Expression::CompileTimeConstant(ConstantDomain::U128(0)) = other.expression
{
// [x & 0] -> 0
return other.clone();
}
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.bit_and(v2);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
//todo: if self is a pointer then special case ptr & 1.
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::BitAnd {
left,
right,
})
}
/// Returns an element that is "!self" where self is an integer.
#[logfn_inputs(TRACE)]
fn bit_not(&self, result_type: ExpressionType) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(v1) = &self.expression {
let result = v1.bit_not(result_type.clone());
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_typed_unary(self.clone(), result_type, |operand, result_type| {
Expression::BitNot {
operand,
result_type,
}
})
}
/// Returns an element that is "self | other".
#[logfn_inputs(TRACE)]
fn bit_or(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
// [x | 0] -> x
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.bit_or(v2);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::BitOr {
left,
right,
})
}
/// Returns an element that is "self ^ other".
#[logfn_inputs(TRACE)]
fn bit_xor(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.bit_xor(v2);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::BitXor {
left,
right,
})
}
/// Returns an element that is "self as target_type".
#[logfn_inputs(TRACE)]
fn cast(&self, target_type: ExpressionType) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(v1) = &self.expression {
let result = v1.cast(&target_type);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
match &self.expression {
Expression::Bottom => self.clone(),
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => condition.conditional_expression(
consequent.cast(target_type.clone()),
alternate.cast(target_type),
),
Expression::Join { left, right, path } => left
.cast(target_type.clone())
.join(right.cast(target_type), &path),
Expression::Switch {
discriminator,
cases,
default,
} => discriminator.switch(
cases
.iter()
.map(|(case_val, result_val)| {
(case_val.clone(), result_val.cast(target_type.clone()))
})
.collect(),
default.cast(target_type),
),
_ => {
match &self.expression {
// [(x as t1) as target_type] -> x as target_type if t1.max_value() >= target_type.max_value()
Expression::Cast {
operand,
target_type: t1,
} => {
if t1.is_integer()
&& target_type.is_unsigned_integer()
&& t1
.max_value()
.greater_or_equal(&target_type.max_value())
.as_bool_if_known()
.unwrap_or(false)
{
return operand.cast(target_type);
}
}
// [(x % c1) as t] -> (x as t) if c1 == t.modulo_value()
Expression::Rem { left, right } => {
if right
.equals(target_type.modulo_value())
.as_bool_if_known()
.unwrap_or(false)
{
return left.cast(target_type);
}
}
_ => (),
}
let source_type = self.expression.infer_type();
if source_type != target_type {
if source_type == ExpressionType::NonPrimitive
&& target_type == ExpressionType::ThinPointer
{
let field0 = Path::new_field(Path::get_as_path(self.clone()), 0);
AbstractValue::make_typed_unknown(target_type, field0)
} else {
AbstractValue::make_typed_unary(
self.clone(),
target_type,
|operand, target_type| Expression::Cast {
operand,
target_type,
},
)
}
} else {
self.clone()
}
}
}
}
/// Returns an element that is "if self { consequent } else { alternate }".
#[logfn_inputs(TRACE)]
fn conditional_expression(
&self,
mut consequent: Rc<AbstractValue>,
mut alternate: Rc<AbstractValue>,
) -> Rc<AbstractValue> {
if self.is_bottom() {
// If the condition is impossible so is the expression.
return self.clone();
}
// If either of the branches is impossible, it must be the other one.
if consequent.is_bottom() {
return alternate;
}
if alternate.is_bottom() {
return consequent;
}
// If the branches are the same, the condition does no matter.
if consequent.expression == alternate.expression {
// [c ? x : x] -> x
return consequent;
}
// If the condition is unknown, the rules below won't fire.
if self.is_top() {
return self.clone();
}
if self.expression == consequent.expression {
// [x ? x : y] -> x || y
return self.or(alternate);
}
if self.expression == alternate.expression {
// [y ? x : y] -> y && x
return self.and(consequent);
}
let self_as_bool = self.as_bool_if_known();
if self_as_bool == Some(true) {
// [true ? x : y] -> x
return consequent;
} else if self_as_bool == Some(false) {
// [false ? x : y] -> y
return alternate;
}
// simplification rules are heuristic and can be non symmetric.
let not_self = self.logical_not();
let not_self_as_bool = not_self.as_bool_if_known();
if not_self_as_bool == Some(false) {
// [true ? x : y] -> x
return consequent;
} else if not_self_as_bool == Some(true) {
// [false ? x : y] -> y
return alternate;
}
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&consequent.expression, &alternate.expression)
{
match (v1, v2) {
(ConstantDomain::True, ConstantDomain::False) => {
// [c ? true : false] -> c
return self.clone();
}
(ConstantDomain::False, ConstantDomain::True) => {
// [c ? false : true] -> !c
return self.logical_not();
}
_ => (),
}
}
if let Expression::LogicalNot { operand } = &self.expression {
// [if !(x) { a } else { b }] -> if x { b } else { a }
return operand.conditional_expression(alternate, consequent);
}
// [if x == 0 { y } else { true }] -> x || y
if let Expression::Equals { left: x, right: z } = &self.expression {
if let Expression::CompileTimeConstant(ConstantDomain::U128(0)) = z.expression {
if alternate.as_bool_if_known().unwrap_or(false) {
return x.or(consequent);
}
}
}
// [if x { true } else { y }] -> x || y
if consequent.as_bool_if_known().unwrap_or(false) {
return self.or(alternate);
}
// [if x { y } else { false }] -> x && y
if !alternate.as_bool_if_known().unwrap_or(true) {
return self.and(consequent);
}
if let Expression::Or { left: x, right: y } = &self.expression {
match &consequent.expression {
Expression::LogicalNot { operand } if *x == *operand => {
// [if x || y { !x } else { z }] -> [!x && y || !x && z] -> !x && (y || z)
return consequent.and(y.or(alternate));
}
Expression::ConditionalExpression {
condition,
consequent: a,
alternate: b,
} => {
// [if x || y { if x {a} else {b} } else {b}] -> if x {a} else {b}
if *x == *condition && *b == alternate {
return x.conditional_expression(a.clone(), alternate);
}
// [if x || y { if y {a} else {b} } else {b}] -> if y {a} else {b}
if *y == *condition && *b == alternate {
return y.conditional_expression(a.clone(), alternate);
}
// [if x || y { if x {a} else {b} } else {a}] -> if y {b} else {a}
if *x == *condition && *a == alternate {
return y.conditional_expression(b.clone(), alternate);
}
// [if x || y { if y {a} else {b} } else {a}] -> if x {b} else {a}
if *y == *condition && *a == alternate {
return x.conditional_expression(b.clone(), alternate);
}
}
_ => (),
}
}
// if self { consequent } else { alternate } implies self in the consequent and !self in the alternate
if !matches!(self.expression, Expression::Or { .. }) {
if consequent.expression_size <= k_limits::MAX_EXPRESSION_SIZE / 10 {
consequent = consequent.refine_with(self, 0);
}
if alternate.expression_size < k_limits::MAX_EXPRESSION_SIZE / 10 {
alternate = alternate.refine_with(¬_self, 0);
}
}
if let Expression::ConditionalExpression {
condition: c2,
consequent: a,
alternate: b,
} = &consequent.expression
{
// [if self { if self { a } else { b } } else { c }] -> if self { a } else { b }
if self.eq(c2) {
return self.conditional_expression(a.clone(), alternate);
}
// [if self { if c2 { a } else { b } } else { b }] -> if condition && c2 { a } else { b }
if b.eq(&alternate) {
return self
.and(c2.clone())
.conditional_expression(a.clone(), alternate);
}
// [if self { if c2 { a } else { b } } else { a }] -> if self && !c2 { b } else { a }
if a.eq(&alternate) {
return self
.and(c2.logical_not())
.conditional_expression(b.clone(), alternate);
}
}
if let Expression::ConditionalExpression {
condition: c2,
consequent: a,
alternate: b,
} = &alternate.expression
{
// [if self { consequent } else { if self { a } else { b } }] -> if self { consequent } else { b }
if self.eq(c2) {
return self.conditional_expression(consequent, b.clone());
}
// [if self { a } else { if c2 { a } else { b } }] -> if self || c2 { a } else { b }
if a.eq(&consequent) {
return self
.or(c2.clone())
.conditional_expression(consequent, b.clone());
}
// [if x == y { consequent } else { if x == z { a } else { b } } ] -> switch x { y => consequent, z => a, _ => b }
if let (
Expression::Equals { left: x, right: y },
Expression::Equals { left: x1, right: z },
) = (&self.expression, &c2.expression)
{
if x.eq(x1) {
return x.switch(
vec![(y.clone(), consequent), (z.clone(), a.clone())],
b.clone(),
);
}
}
}
// [if x == y { consequent } else { switch x { z => a, _ => b } ] -> switch x { y => consequent, z => a, _ => b }
if let (
Expression::Equals { left: x, right: y },
Expression::Switch {
discriminator,
cases,
default,
},
) = (&self.expression, &alternate.expression)
{
if x.eq(discriminator) {
let mut cases = cases.clone();
cases.push((y.clone(), consequent));
return discriminator.switch(cases, default.clone());
}
}
let mut expression_size = self
.expression_size
.saturating_add(consequent.expression_size)
.saturating_add(alternate.expression_size);
let mut consequent_type = consequent.expression.infer_type();
let mut alternate_type = alternate.expression.infer_type();
// In this context not primitive is expected to indicate that the value is a default value obtained
// via an unspecialized summary from a generic function.
if !consequent_type.is_primitive() && alternate_type.is_primitive() {
consequent = consequent.try_to_retype_as(&alternate_type);
consequent_type = consequent.expression.infer_type();
} else if consequent_type.is_primitive() && !alternate_type.is_primitive() {
alternate = alternate.try_to_retype_as(&consequent_type);
alternate_type = alternate.expression.infer_type();
};
if consequent_type != alternate_type
&& !(consequent_type.is_integer() && alternate_type.is_integer())
&& !(consequent.is_top() || alternate.is_top())
{
debug!(
"conditional with mismatched types {:?}: {:?} {:?}: {:?}",
consequent_type, consequent, alternate_type, alternate
);
}
let mut condition = self.clone();
if expression_size > k_limits::MAX_EXPRESSION_SIZE {
let condition_plus_consequent = self
.expression_size
.saturating_add(consequent.expression_size);
if condition_plus_consequent < k_limits::MAX_EXPRESSION_SIZE - 1 {
alternate = AbstractValue::make_from(alternate.expression.clone(), u64::MAX);
expression_size = condition_plus_consequent + 1;
} else {
let condition_plus_alternate = self
.expression_size
.saturating_add(alternate.expression_size);
if condition_plus_alternate < k_limits::MAX_EXPRESSION_SIZE - 1 {
consequent = AbstractValue::make_from(consequent.expression.clone(), u64::MAX);
expression_size = condition_plus_alternate + 1;
} else {
let consequent_plus_alternate = consequent
.expression_size
.saturating_add(alternate.expression_size);
if consequent_plus_alternate < k_limits::MAX_EXPRESSION_SIZE - 1 {
condition =
AbstractValue::make_from(condition.expression.clone(), u64::MAX);
expression_size = consequent_plus_alternate + 1;
}
}
}
}
AbstractValue::make_from(
ConditionalExpression {
condition,
consequent,
alternate,
},
expression_size,
)
}
// Attempts to construct an equivalent expression to self, but with the difference that
// the type inferred for the resulting expression will be the target type.
// If this is not possible, the original expression is returned.
// The need for this function arises from the difficulty of correctly typing variables that have
// generic types when constructed, but then leak out to caller contexts via summaries.
#[logfn_inputs(TRACE)]
fn try_to_retype_as(&self, target_type: &ExpressionType) -> Rc<AbstractValue> {
match &self.expression {
Expression::Add { left, right } => left
.try_to_retype_as(target_type)
.addition(right.try_to_retype_as(target_type)),
Expression::BitAnd { left, right } => left
.try_to_retype_as(target_type)
.bit_and(right.try_to_retype_as(target_type)),
Expression::BitOr { left, right } => left
.try_to_retype_as(target_type)
.bit_or(right.try_to_retype_as(target_type)),
Expression::BitXor { left, right } => left
.try_to_retype_as(target_type)
.bit_xor(right.try_to_retype_as(target_type)),
Expression::Cast {
operand,
target_type: tt,
} if *tt == ExpressionType::NonPrimitive || *tt == ExpressionType::ThinPointer => {
operand.try_to_retype_as(target_type)
}
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => {
let consequent = consequent.try_to_retype_as(target_type);
let alternate = alternate.try_to_retype_as(target_type);
condition.conditional_expression(consequent, alternate)
}
Expression::Div { left, right } => left
.try_to_retype_as(target_type)
.divide(right.try_to_retype_as(target_type)),
Expression::Join { path, left, right } => left
.try_to_retype_as(target_type)
.join(right.try_to_retype_as(target_type), &path),
Expression::Mul { left, right } => left
.try_to_retype_as(target_type)
.multiply(right.try_to_retype_as(target_type)),
Expression::Rem { left, right } => left
.try_to_retype_as(target_type)
.remainder(right.try_to_retype_as(target_type)),
Expression::Shl { left, right } => left
.try_to_retype_as(target_type)
.shift_left(right.try_to_retype_as(target_type)),
Expression::Sub { left, right } => left
.try_to_retype_as(target_type)
.subtract(right.try_to_retype_as(target_type)),
Expression::Neg { operand } => operand.try_to_retype_as(target_type).negate(),
Expression::InitialParameterValue { path, .. } => {
AbstractValue::make_initial_parameter_value(target_type.clone(), path.clone())
}
Expression::Switch {
discriminator,
cases,
default,
} => discriminator.switch(
cases
.iter()
.map(|(case_val, result_val)| {
(case_val.clone(), result_val.try_to_retype_as(target_type))
})
.collect(),
default.try_to_retype_as(target_type),
),
Expression::TaggedExpression { operand, tag } => {
operand.try_to_retype_as(target_type).add_tag(*tag)
}
Expression::Variable { path, .. } => {
AbstractValue::make_typed_unknown(target_type.clone(), path.clone())
}
Expression::WidenedJoin { .. } => self.clone(),
_ => self.clone(),
}
}
/// Returns an element that is "*self".
#[logfn_inputs(TRACE)]
fn dereference(&self, target_type: ExpressionType) -> Rc<AbstractValue> {
match &self.expression {
Expression::Bottom | Expression::Top => self.clone(),
Expression::Cast {
operand,
target_type: cast_type,
} => {
checked_assume!(
*cast_type == ExpressionType::NonPrimitive
|| *cast_type == ExpressionType::ThinPointer
);
operand.dereference(target_type)
}
Expression::CompileTimeConstant(..) => self.clone(),
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => condition.conditional_expression(
consequent.dereference(target_type.clone()),
alternate.dereference(target_type),
),
Expression::Join { path, left, right } => left
.dereference(target_type.clone())
.join(right.dereference(target_type), path),
Expression::Offset { .. } => {
let path = Path::get_as_path(self.clone());
let deref_path = Path::new_deref(path, target_type.clone());
if let PathEnum::Computed { value }
| PathEnum::HeapBlock { value }
| PathEnum::Offset { value } = &deref_path.value
{
value.clone()
} else {
AbstractValue::make_typed_unknown(target_type, deref_path)
}
}
Expression::InitialParameterValue { path, .. } => {
AbstractValue::make_initial_parameter_value(
target_type,
Path::new_qualified(path.clone(), Rc::new(PathSelector::Deref)),
)
}
Expression::Reference(path) => {
if target_type != ExpressionType::NonPrimitive {
// We don't have to shallow copy the value at the source path, so *&p is just p.
if let PathEnum::Computed { value }
| PathEnum::HeapBlock { value }
| PathEnum::Offset { value } = &path.value
{
return value.clone();
}
}
AbstractValue::make_typed_unknown(target_type, path.clone())
}
Expression::Switch {
discriminator,
cases,
default,
} => discriminator.switch(
cases
.iter()
.map(|(case_val, result_val)| {
(
case_val.clone(),
result_val.dereference(target_type.clone()),
)
})
.collect(),
default.dereference(target_type),
),
Expression::UninterpretedCall { path, .. } | Expression::Variable { path, .. } => {
AbstractValue::make_typed_unknown(
target_type,
Path::new_qualified(path.clone(), Rc::new(PathSelector::Deref)),
)
}
Expression::WidenedJoin { path, operand } => {
operand.dereference(target_type).widen(path)
}
_ => {
info!(
"found unhandled expression that is of type reference: {:?}",
self.expression
);
AbstractValue::make_typed_unknown(target_type, Path::new_computed(Rc::new(TOP)))
}
}
}
/// Returns an element that is "self / other".
#[logfn_inputs(TRACE)]
fn divide(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
match (&self.expression, &other.expression) {
// [(x * y) / x] -> y
// [(x * y) / y] -> x
(Expression::Mul { left: x, right: y }, _) => {
if x.expression == other.expression {
return y.clone();
} else if y.expression == other.expression {
return x.clone();
}
}
(
Expression::Cast {
operand,
target_type,
},
Expression::CompileTimeConstant(ConstantDomain::U128(c2)),
) => {
if let Expression::Mul { left: x, right: y } = &operand.expression {
if x.expression == other.expression {
// [((x * y) as target_type) / x] -> y as target_type
return y.cast(target_type.clone());
} else if y.expression == other.expression {
// [((x * y) as target_type) / y] -> x as target_type
return x.cast(target_type.clone());
} else {
// [((c1 * y) as t) / c2] -> ((c1 / c2) * y) as t if c1 >= c2 and c1 % c2 == 0
if let Expression::CompileTimeConstant(ConstantDomain::U128(c1)) =
&x.expression
{
if *c1 > *c2 && *c1 % *c2 == 0 {
return x
.divide(other)
.multiply(y.clone())
.cast(target_type.clone());
}
}
}
}
}
_ => (),
}
self.try_to_simplify_binary_op(other, ConstantDomain::div, Self::divide, |l, r| {
AbstractValue::make_binary(l, r, |left, right| Expression::Div { left, right })
})
}
/// Returns an abstract value that describes if `tag` is *not* attached to `self`.
#[logfn_inputs(TRACE)]
fn does_not_have_tag(&self, tag: &Tag) -> Rc<AbstractValue> {
self.has_tag(tag).logical_not()
}
/// Returns an element that is "self == other".
#[logfn_inputs(TRACE)]
fn equals(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
return Rc::new(v1.equals(v2).into());
};
match (&self.expression, &other.expression) {
// x == true -> x
(_, Expression::CompileTimeConstant(ConstantDomain::True)) => {
return self.clone();
}
// true == x -> x
(Expression::CompileTimeConstant(ConstantDomain::True), _) => {
return other.clone();
}
// x == false -> !x
(_, Expression::CompileTimeConstant(ConstantDomain::False)) => {
return self.logical_not();
}
// false == x -> !x
(Expression::CompileTimeConstant(ConstantDomain::False), _) => {
return other.logical_not();
}
// [(c ? v1: true) == 0] -> c && !v1
// [(c ? v1: v2) == c3] -> !c if v1 != c3 && v2 == c3
// [(c ? v1: v2) == c3] -> c if v1 == c3 && v2 != c3
// [(c ? v1: v2) == c3] -> true if v1 == c3 && v2 == c3
(
Expression::ConditionalExpression {
condition: c,
consequent: v1,
alternate: v2,
..
},
Expression::CompileTimeConstant(con),
) => {
if let ConstantDomain::U128(0) = con {
if let Expression::CompileTimeConstant(ConstantDomain::True) = v2.expression {
return c.and(v1.logical_not());
}
}
let v2_eq_other = v2.equals(other.clone()).as_bool_if_known().unwrap_or(false);
if v1
.not_equals(other.clone())
.as_bool_if_known()
.unwrap_or(false)
&& v2_eq_other
{
return c.logical_not();
}
if v1.equals(other.clone()).as_bool_if_known().unwrap_or(false) {
if v2
.not_equals(other.clone())
.as_bool_if_known()
.unwrap_or(false)
{
return c.clone();
} else if v2_eq_other {
return Rc::new(TRUE);
}
}
let x = &self.expression;
let y = &other.expression;
if x == y && !x.infer_type().is_floating_point_number() {
return Rc::new(TRUE);
}
}
// [c3 == (c ? v1: v2)] -> !c if v1 != c3 && v2 == c3
// [c3 == (c ? v1: v2) == c3] -> c if v1 == c3 && v2 != c3
// [c3 == (c ? v1: v2) == c3] -> true if v1 == c3 && v2 == c3
(
Expression::CompileTimeConstant(..),
Expression::ConditionalExpression {
condition: c,
consequent: v1,
alternate: v2,
..
},
) => {
let v2_eq_self = v2.equals(self.clone()).as_bool_if_known().unwrap_or(false);
if v1
.not_equals(self.clone())
.as_bool_if_known()
.unwrap_or(false)
&& v2_eq_self
{
return c.logical_not();
}
if v1.equals(self.clone()).as_bool_if_known().unwrap_or(false) {
if v2
.not_equals(self.clone())
.as_bool_if_known()
.unwrap_or(false)
{
return c.clone();
} else if v2_eq_self {
return Rc::new(TRUE);
}
}
let x = &self.expression;
let y = &other.expression;
if x == y && !x.infer_type().is_floating_point_number() {
return Rc::new(TRUE);
}
}
// [!x == 0] -> x when x is Boolean. Canonicalize it to the latter.
(
Expression::LogicalNot { operand },
Expression::CompileTimeConstant(ConstantDomain::U128(val)),
) => {
if *val == 0 && operand.expression.infer_type() == ExpressionType::Bool {
return operand.clone();
}
}
// [x == 0] -> !x when x is a Boolean. Canonicalize it to the latter.
// [x == 1] -> x when x is a Boolean. Canonicalize it to the latter.
(x, Expression::CompileTimeConstant(ConstantDomain::U128(val))) => {
if x.infer_type() == ExpressionType::Bool {
if *val == 0 {
return self.logical_not();
} else if *val == 1 {
return self.clone();
}
}
}
// [(if x { y } else { z }) == z] -> [if x { y == z } else { true }] -> !x || y == z
(
Expression::ConditionalExpression {
condition: x,
consequent: y,
alternate: z,
},
_,
) if *z == other => {
return x.logical_not().or(y.equals(z.clone()));
}
(Expression::Reference { .. }, Expression::Cast { .. })
| (Expression::Cast { .. }, Expression::Reference { .. }) => {
return Rc::new(FALSE);
}
(x, y) => {
// If self and other are the same expression and the expression could not result in
// NaN we can simplify this to true.
if x == y && !x.infer_type().is_floating_point_number() {
return Rc::new(TRUE);
}
}
}
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::Equals {
left,
right,
})
}
/// Returns an element that is "self >= other".
#[logfn_inputs(TRACE)]
fn greater_or_equal(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
return Rc::new(v1.greater_or_equal(v2).into());
};
if let Some(result) = self
.get_cached_interval()
.greater_or_equal(&other.get_cached_interval())
{
return Rc::new(result.into());
}
AbstractValue::make_binary(self.clone(), other, |left, right| {
Expression::GreaterOrEqual { left, right }
})
}
/// Returns an element that is "self > other".
#[logfn_inputs(TRACE)]
fn greater_than(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
return Rc::new(v1.greater_than(v2).into());
};
if let Some(result) = self
.get_cached_interval()
.greater_than(other.get_cached_interval().as_ref())
{
return Rc::new(result.into());
}
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::GreaterThan {
left,
right,
})
}
/// Returns an abstract value that describes whether `tag` is attached to `self` or not.
#[logfn_inputs(TRACE)]
fn has_tag(&self, tag: &Tag) -> Rc<AbstractValue> {
if self.is_bottom() || self.is_top() {
self.clone()
} else {
Rc::new(self.get_cached_tags().has_tag(tag).into())
}
}
/// Returns true if "self => other" is known at compile time to be true.
/// Returning false does not imply the implication is false, just that we do not know.
///
/// Important: keep the performance of this function proportional to the size of self.
#[logfn_inputs(TRACE)]
fn implies(&self, other: &Rc<AbstractValue>) -> bool {
if self.is_bottom() || self.is_top() || other.is_bottom() || other.is_top() {
return false;
}
// x => true, is always true
// false => x, is always true
// x => x, is always true
if other.as_bool_if_known().unwrap_or(false)
|| !self.as_bool_if_known().unwrap_or(true)
|| self.eq(other)
{
return true;
}
// x && y => x
// y && x => x
if let Expression::And { left, right } = &self.expression {
return left.implies(other) || right.implies(other);
}
false
}
/// Returns true if "self => !other" is known at compile time to be true.
/// Returning false does not imply the implication is false, just that we do not know.
#[logfn_inputs(TRACE)]
fn implies_not(&self, other: &Rc<AbstractValue>) -> bool {
if self.is_bottom() || self.is_top() || other.is_bottom() || other.is_top() {
return false;
}
// x => !false, is always true
// false => !x, is always true
if !other.as_bool_if_known().unwrap_or(true) || !self.as_bool_if_known().unwrap_or(true) {
return true;
};
// !x => !x
if let Expression::LogicalNot { ref operand } = self.expression {
return (**operand).eq(other);
}
false
}
/// Returns self.f(other) where f is an intrinsic binary function.
#[logfn_inputs(TRACE)]
fn intrinsic_binary(&self, other: Self, name: KnownNames) -> Self {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
return Rc::new(v1.intrinsic_binary(v2, name).into());
};
AbstractValue::make_from(
Expression::IntrinsicBinary {
left: self.clone(),
right: other,
name,
},
self.expression_size.saturating_add(1),
)
}
/// Returns (self as u(8|16|32|64|128)).f() where f is an intrinsic bit vector unary function.
#[logfn_inputs(TRACE)]
fn intrinsic_bit_vector_unary(&self, bit_length: u8, name: KnownNames) -> Self {
if let Expression::CompileTimeConstant(v1) = &self.expression {
let result = v1.intrinsic_bit_vector_unary(bit_length, name);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_from(
Expression::IntrinsicBitVectorUnary {
operand: self.clone(),
bit_length,
name,
},
self.expression_size.saturating_add(1),
)
}
/// Returns self.f() where f is an intrinsic floating point unary function.
#[logfn_inputs(TRACE)]
fn intrinsic_floating_point_unary(&self, name: KnownNames) -> Self {
if let Expression::CompileTimeConstant(v1) = &self.expression {
let result = v1.intrinsic_floating_point_unary(name);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_from(
Expression::IntrinsicFloatingPointUnary {
operand: self.clone(),
name,
},
self.expression_size.saturating_add(1),
)
}
/// Returns true if "!self => other" is known at compile time to be true.
/// Returning false does not imply the implication is false, just that we do not know.
#[logfn_inputs(TRACE)]
fn inverse_implies(&self, other: &Rc<AbstractValue>) -> bool {
if let Expression::LogicalNot { operand } = &self.expression {
return operand.implies(other);
}
if let Expression::Or { left, right } = &self.expression {
// (!x && !y) => z if !x => z or !y => z
return left.inverse_implies(other) || right.inverse_implies(other);
}
if let Expression::LogicalNot { operand } = &other.expression {
return self.inverse_implies_not(operand);
}
// x => true, is always true
// false => x, is always true
if other.as_bool_if_known().unwrap_or(false) || self.as_bool_if_known().unwrap_or(false) {
return true;
}
false
}
/// Returns true if "!self => !other" is known at compile time to be true.
/// Returning false does not imply the implication is false, just that we do not know.
#[logfn_inputs(TRACE)]
fn inverse_implies_not(&self, other: &Rc<AbstractValue>) -> bool {
if self == other {
return true;
}
if let Expression::Or { left, right } = &self.expression {
// (!x && !y) => !z if !x => !z or !y => !z
return left.inverse_implies_not(other) || right.inverse_implies_not(other);
}
if let Expression::And { left, right } = &other.expression {
return self.inverse_implies_not(left) || self.implies_not(right);
}
false
}
/// True if the set of concrete values that correspond to this domain is empty.
#[logfn_inputs(TRACE)]
fn is_bottom(&self) -> bool {
match &self.expression {
Expression::Bottom => true,
Expression::Variable { path, .. } => {
if let PathEnum::Computed { value } = &path.value {
value.is_bottom()
} else {
false
}
}
_ => false,
}
}
/// True if this value is a compile time constant.
#[logfn_inputs(TRACE)]
fn is_compile_time_constant(&self) -> bool {
matches!(&self.expression, Expression::CompileTimeConstant(..))
}
/// True if the storage referenced by this expression is, or is contained in, a zeroed heap allocation.
#[logfn_inputs(TRACE)]
fn is_contained_in_zeroed_heap_block(&self) -> bool {
match &self.expression {
Expression::HeapBlock { is_zeroed, .. } => *is_zeroed,
Expression::Offset { left, .. } => left.is_contained_in_zeroed_heap_block(),
Expression::Reference(path)
| Expression::InitialParameterValue { path, .. }
| Expression::Variable { path, .. } => path.is_rooted_by_zeroed_heap_block(),
_ => false,
}
}
/// True if all possible concrete values are elements of the set corresponding to this domain.
#[logfn_inputs(TRACE)]
fn is_top(&self) -> bool {
match &self.expression {
Expression::Top => true,
Expression::Variable { path, .. } => {
if let PathEnum::Computed { value } = &path.value {
value.is_top()
} else {
false
}
}
_ => false,
}
}
/// True if this value is an empty tuple, which is the sole value of the unit type.
#[logfn_inputs(TRACE)]
fn is_unit(&self) -> bool {
matches!(
&self.expression,
Expression::CompileTimeConstant(ConstantDomain::Unit)
)
}
/// Returns an abstract value whose corresponding set of concrete values includes all of the values
/// corresponding to self and other.
#[logfn_inputs(TRACE)]
fn join(&self, other: Rc<AbstractValue>, path: &Rc<Path>) -> Rc<AbstractValue> {
// [{} join y] -> y
if self.is_bottom() {
return other;
}
// [TOP join y] -> TOP
if self.is_top() {
return self.clone();
}
// [x join {}] -> x
if other.is_bottom() {
return self.clone();
}
// [x join x] -> x
if (*self) == other {
return other;
}
// [x join TOP] -> TOP
if other.is_top() {
return other;
}
// [(x has a subexpression that widens at path) join y] -> widened subexpression
if let Some(widened_subexpression) = self.get_widened_subexpression(path) {
return widened_subexpression;
}
// [x join (y has a subexpression that widens at path)] -> widened subexpression
if let Some(widened_subexpression) = other.get_widened_subexpression(path) {
return widened_subexpression;
}
match (&self.expression, &other.expression) {
// [(x join y) join (y join z)] -> x join (y join z)
(
Expression::Join {
left: x, right: y1, ..
},
Expression::Join { left: y2, .. },
) if y1.eq(y2) => {
return x.join(other, path);
}
// [(x join y) join (z join a)] -> x join (y join (z join a))
(
Expression::Join {
left: x, right: y, ..
},
Expression::Join { .. },
) => {
return x.join(y.join(other, path), path);
}
_ => {}
}
let expression_size = self.expression_size.saturating_add(other.expression_size);
AbstractValue::make_from(
Expression::Join {
path: path.clone(),
left: self.clone(),
right: other,
},
expression_size,
)
}
/// Returns an element that is "self <= other".
#[logfn_inputs(TRACE)]
fn less_or_equal(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
return Rc::new(v1.less_or_equal(v2).into());
};
if let Some(result) = self
.get_cached_interval()
.less_equal(&other.get_cached_interval())
{
return Rc::new(result.into());
}
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::LessOrEqual {
left,
right,
})
}
/// Returns an element that is self < other
#[logfn_inputs(TRACE)]
fn less_than(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
return Rc::new(v1.less_than(v2).into());
};
if let Some(result) = self
.get_cached_interval()
.less_than(other.get_cached_interval().as_ref())
{
return Rc::new(result.into());
}
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::LessThan {
left,
right,
})
}
/// Returns an element that is "!self" where self is a bool.
#[logfn_inputs(TRACE)]
fn logical_not(&self) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(v1) = &self.expression {
let result = v1.logical_not();
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
match &self.expression {
Expression::Bottom => self.clone(),
Expression::Equals { left: x, right: y } if x.expression.infer_type().is_integer() => {
// [!(x == y)] -> x != y
x.not_equals(y.clone())
}
Expression::GreaterThan { left: x, right: y }
if x.expression.infer_type().is_integer() =>
{
// [!(x > y)] -> x <= y
x.less_or_equal(y.clone())
}
Expression::GreaterOrEqual { left: x, right: y }
if x.expression.infer_type().is_integer() =>
{
// [!(x >= y)] -> x < y
x.less_than(y.clone())
}
Expression::LessThan { left: x, right: y }
if x.expression.infer_type().is_integer() =>
{
// [!(x < y)] -> x >= y
x.greater_or_equal(y.clone())
}
Expression::LessOrEqual { left: x, right: y }
if x.expression.infer_type().is_integer() =>
{
// [!(x <= y)] -> x > y
x.greater_than(y.clone())
}
Expression::LogicalNot { operand } => {
// [!!x] -> x
operand.clone()
}
Expression::Ne { left: x, right: y } if x.expression.infer_type().is_integer() => {
// [!(x != y)] -> x == y
x.equals(y.clone())
}
Expression::Or { left: x, right }
if matches!(right.expression, Expression::LogicalNot { .. }) =>
{
// [!(x || !y)] -> !x && y
if let Expression::LogicalNot { operand: y } = &right.expression {
x.logical_not().and(y.clone())
} else {
unreachable!()
}
}
Expression::Or { left, right: y }
if matches!(left.expression, Expression::LogicalNot { .. }) =>
{
// [!(!x || y)] -> x && !y
if let Expression::LogicalNot { operand: x } = &left.expression {
x.and(y.logical_not())
} else {
unreachable!()
}
}
_ => AbstractValue::make_unary(self.clone(), |operand| Expression::LogicalNot {
operand,
}),
}
}
/// A heuristic for whether refining this value in the current environment could lead to simplification.
/// Used to reduce the cost of optional refinement.
#[logfn_inputs(TRACE)]
fn might_benefit_from_refinement(&self) -> bool {
match &self.expression {
Expression::Cast { operand, .. }
| Expression::TaggedExpression { operand, .. }
| Expression::UnknownTagCheck { operand, .. } => {
operand.might_benefit_from_refinement()
}
Expression::ConditionalExpression {
consequent,
alternate,
..
} => {
consequent.might_benefit_from_refinement()
|| alternate.might_benefit_from_refinement()
}
Expression::Join { left, right, .. } => {
left.might_benefit_from_refinement() || right.might_benefit_from_refinement()
}
Expression::Offset { .. } | Expression::Reference(..) => {
// These won't benefit directly, but are cheap and make the heuristic useful when
// they are directly embedded inside conditional or join expressions.
true
}
Expression::Switch {
discriminator,
cases,
default,
} => {
discriminator.might_benefit_from_refinement()
|| default.might_benefit_from_refinement()
|| cases.iter().any(|(_, v)| v.might_benefit_from_refinement())
}
Expression::UninterpretedCall { path, .. }
| Expression::UnknownModelField { path, .. }
| Expression::UnknownTagField { path, .. }
| Expression::Variable { path, .. }
| Expression::WidenedJoin { path, .. } => {
if let PathEnum::Computed { value } = &path.value {
return value.might_benefit_from_refinement();
}
false
}
_ => false,
}
}
/// Returns an element that is "self * other".
#[logfn_inputs(TRACE)]
fn multiply(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(v1) = &self.expression {
match v1 {
// [0 * y] -> 0
ConstantDomain::I128(0) | ConstantDomain::U128(0) => {
return self.clone();
}
// [1 * y] -> y
ConstantDomain::I128(1) | ConstantDomain::U128(1) => {
return other;
}
_ => (),
}
}
if let Expression::CompileTimeConstant(c2) = &other.expression {
match c2 {
// [x * 0] -> 0
ConstantDomain::I128(0) | ConstantDomain::U128(0) => {
return other;
}
// [x * 1] -> x
ConstantDomain::I128(1) | ConstantDomain::U128(1) => {
return self.clone();
}
_ => {
// [(x / c1) * c2] -> x / (c1 / c2) if c1 > c2 && c1 % c2 == 0
if let Expression::Div { left: x, right } = &self.expression {
if let Expression::CompileTimeConstant(c1) = &right.expression {
if let (ConstantDomain::U128(c1), ConstantDomain::U128(c2)) = (c1, c2) {
if c1 > c2 && c1 % c2 == 0 {
let c1_div_c2: Rc<AbstractValue> = Rc::new((c1 / c2).into());
return x.divide(c1_div_c2);
}
}
}
}
}
}
}
self.try_to_simplify_binary_op(other, ConstantDomain::mul, Self::multiply, |l, r| {
AbstractValue::make_binary(l, r, |left, right| Expression::Mul { left, right })
})
}
/// Returns an element that is true if "self * other" is not in range of target_type.
#[logfn_inputs(TRACE)]
fn mul_overflows(
&self,
other: Rc<AbstractValue>,
target_type: ExpressionType,
) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(v1) = &self.expression {
match v1 {
// [0 * y] -> 0
ConstantDomain::I128(0) | ConstantDomain::U128(0) => {
return Rc::new(FALSE);
}
// [1 * y] -> y
ConstantDomain::I128(1) | ConstantDomain::U128(1) => {
return Rc::new(FALSE);
}
_ => (),
}
}
if let Expression::CompileTimeConstant(c2) = &other.expression {
match c2 {
// [x * 0] -> 0
ConstantDomain::I128(0) | ConstantDomain::U128(0) => {
return Rc::new(FALSE);
}
// [x * 1] -> x
ConstantDomain::I128(1) | ConstantDomain::U128(1) => {
return Rc::new(FALSE);
}
_ => (),
}
}
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.mul_overflows(v2, &target_type);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
let interval = self.get_cached_interval().mul(&other.get_cached_interval());
if interval.is_contained_in(&target_type) {
return Rc::new(FALSE);
}
AbstractValue::make_typed_binary(
self.clone(),
other,
target_type,
|left, right, result_type| Expression::MulOverflows {
left,
right,
result_type,
},
)
}
/// Returns an element that is "-self".
#[logfn_inputs(TRACE)]
fn negate(self) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(v1) = &self.expression {
let result = v1.neg();
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_unary(self, |operand| Expression::Neg { operand })
}
/// Returns an element that is "self != other".
#[logfn_inputs(TRACE)]
fn not_equals(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
return Rc::new(v1.not_equals(v2).into());
};
match (&self.expression, &other.expression) {
// x != true -> !x
(_, Expression::CompileTimeConstant(ConstantDomain::True)) => {
return self.logical_not();
}
// true != x -> !x
(Expression::CompileTimeConstant(ConstantDomain::True), _) => {
return other.logical_not();
}
// x != false -> x
(_, Expression::CompileTimeConstant(ConstantDomain::False)) => {
return self.clone();
}
// false != x -> x
(Expression::CompileTimeConstant(ConstantDomain::False), _) => {
return other.clone();
}
// [!x != 0] -> !x when x is Boolean. Canonicalize it to the latter.
(
Expression::LogicalNot { operand },
Expression::CompileTimeConstant(ConstantDomain::U128(val)),
) => {
if *val == 0 && operand.expression.infer_type() == ExpressionType::Bool {
return self.clone();
}
}
// [x != 0] -> x when x is a Boolean. Canonicalize it to the latter.
(x, Expression::CompileTimeConstant(ConstantDomain::U128(val))) => {
if x.infer_type() == ExpressionType::Bool && *val == 0 {
return self.clone();
}
}
// [(c ? v1: v2) != c3] -> !c if v1 == c3 && v2 != c3
// [(c ? v1: v2) != c3] -> c if v1 != c3 && v2 == c3
// [(c ? v1: v2) != c3] -> true if v1 != c3 && v2 != c3
(
Expression::ConditionalExpression {
condition: c,
consequent: v1,
alternate: v2,
..
},
Expression::CompileTimeConstant(..),
) => {
let v2_ne_other = v2
.not_equals(other.clone())
.as_bool_if_known()
.unwrap_or(false);
if v1.equals(other.clone()).as_bool_if_known().unwrap_or(false) && v2_ne_other {
return c.logical_not();
}
if v1
.not_equals(other.clone())
.as_bool_if_known()
.unwrap_or(false)
{
if v2.equals(other.clone()).as_bool_if_known().unwrap_or(false) {
return c.clone();
} else if v2_ne_other {
return Rc::new(TRUE);
}
}
let x = &self.expression;
let y = &other.expression;
if x == y && !x.infer_type().is_floating_point_number() {
return Rc::new(FALSE);
}
}
// [c3 != (c ? v1: v2)] -> !c if v1 == c3 && v2 != c3
// [c3 != (c ? v1: v2)] -> c if v1 != c3 && v2 == c3
// [c3 != (c ? v1: v2)] -> true if v1 != c3 && v2 != c3
(
Expression::CompileTimeConstant(..),
Expression::ConditionalExpression {
condition: c,
consequent: v1,
alternate: v2,
..
},
) => {
let v2_ne_self = v2
.not_equals(self.clone())
.as_bool_if_known()
.unwrap_or(false);
if v1.equals(self.clone()).as_bool_if_known().unwrap_or(false) && v2_ne_self {
return c.logical_not();
}
if v1
.not_equals(self.clone())
.as_bool_if_known()
.unwrap_or(false)
{
if v2.equals(self.clone()).as_bool_if_known().unwrap_or(false) {
return c.clone();
} else if v2_ne_self {
return Rc::new(TRUE);
}
}
let x = &self.expression;
let y = &other.expression;
if x == y && !x.infer_type().is_floating_point_number() {
return Rc::new(FALSE);
}
}
(x, y) => {
// If self and other are the same expression and the expression could not result in
// NaN we can simplify this to false.
if x == y && !x.infer_type().is_floating_point_number() {
return Rc::new(FALSE);
}
}
}
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::Ne {
left,
right,
})
}
/// Returns an element that is "self.other".
#[logfn_inputs(TRACE)]
fn offset(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if matches!(
other.expression,
Expression::CompileTimeConstant(ConstantDomain::I128(0))
) {
return self.clone();
}
if let Expression::Offset { left, right } = &self.expression {
AbstractValue::make_binary(left.clone(), right.addition(other), |left, right| {
Expression::Offset { left, right }
})
} else {
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::Offset {
left,
right,
})
}
}
/// Returns an element that is "self || other".
#[logfn_inputs(TRACE)]
#[allow(clippy::cognitive_complexity)]
fn or(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
fn unsimplified(x: &Rc<AbstractValue>, y: Rc<AbstractValue>) -> Rc<AbstractValue> {
AbstractValue::make_binary(x.clone(), y, |left, right| Expression::Or { left, right })
}
fn is_contained_in(x: &Rc<AbstractValue>, y: &Rc<AbstractValue>) -> bool {
if *x == *y {
return true;
}
if let Expression::Or { left, right } = &y.expression {
is_contained_in(x, left) || is_contained_in(x, right)
} else {
false
}
}
let self_as_bool = self.as_bool_if_known();
if !self_as_bool.unwrap_or(true) {
// [false || y] -> y
other
} else if self_as_bool.unwrap_or(false) || other.as_bool_if_known().unwrap_or(false) {
// [x || true] -> true
// [true || y] -> true
Rc::new(TRUE)
} else if other.is_top() || other.is_bottom() || !self.as_bool_if_known().unwrap_or(true) {
// [self || TOP] -> TOP
// [self || BOTTOM] -> BOTTOM
// [false || other] -> other
other
} else if self.is_top() || self.is_bottom() || !other.as_bool_if_known().unwrap_or(true) {
// [TOP || other] -> TOP
// [BOTTOM || other] -> BOTTOM
// [self || false] -> self
self.clone()
} else {
// [x || x] -> x
if is_contained_in(self, &other) {
return self.clone();
}
// [!x || x] -> true
if let Expression::LogicalNot { operand } = &self.expression {
if is_contained_in(operand, &other) {
return Rc::new(TRUE);
}
}
// [x || !x] -> true
if let Expression::LogicalNot { operand } = &other.expression {
if is_contained_in(operand, &self) {
return Rc::new(TRUE);
}
} else if is_contained_in(&self.logical_not(), &other) {
return Rc::new(TRUE);
}
// [x || (x || y)] -> x || y
// [x || (y || x)] -> x || y
// [(x || y) || y] -> x || y
// [(x || y) || x] -> x || y
if is_contained_in(self, &other) {
return other;
} else if is_contained_in(&other, self) {
return self.clone();
}
// [self || (x && y)] -> self || y if !self => x
if let Expression::And { left, right: y } = &other.expression {
if self.inverse_implies(left) {
return self.or(y.clone());
}
}
// [x || (x && y)] -> x, etc.
if self.inverse_implies_not(&other) {
return self.clone();
}
match (&self.expression, &other.expression) {
// [!x || x] -> true
(Expression::LogicalNot { ref operand }, _) if (**operand).eq(&other) => {
Rc::new(TRUE)
}
// [x || !x] -> true
(_, Expression::LogicalNot { ref operand }) if (**operand).eq(&self) => {
Rc::new(TRUE)
}
// [(x == y) || (x != y)] -> true if x is not a floating point
(
Expression::Equals {
left: x1,
right: y1,
},
Expression::Ne {
left: x2,
right: y2,
},
) if x1.eq(x2)
&& y1.eq(y2)
&& !x1.expression.infer_type().is_floating_point_number() =>
{
Rc::new(TRUE)
}
// [x >= y || x < y] -> true if x is not a floating point
(
Expression::GreaterOrEqual {
left: x1,
right: y1,
},
Expression::LessThan {
left: x2,
right: y2,
},
) if x1.eq(x2)
&& y1.eq(y2)
&& !x1.expression.infer_type().is_floating_point_number() =>
{
Rc::new(TRUE)
}
// [(x && y) || (x && !y)] -> x
// [(x && y1) || (x && y2)] -> (x && (y1 || y2))
// [(x && y1) || ((x && x3) && y2)] -> x && (y1 || (x3 && y2))
(
Expression::And {
left: x1,
right: y1,
},
Expression::And {
left: x2,
right: y2,
},
) => {
if x1 == x2 {
if y1.logical_not().eq(y2) {
x1.clone()
} else {
x1.and(y1.or(y2.clone()))
}
} else if y1 == y2 {
// [(x1 && y) || (x2 && y)] -> (x1 || x2) && y
x1.or(x2.clone()).and(y1.clone())
} else {
if let Expression::And {
left: x2,
right: x3,
} = &x2.expression
{
if x1 == x2 {
return x1.and(y1.or(x3.and(y2.clone())));
}
}
unsimplified(self, other)
}
}
// [((c ? e : 1) == 1) || ((c ? e : 1) == 0)] -> !c || e == 0 || e == 1
(
Expression::Equals {
left: l1,
right: r1,
},
Expression::Equals {
left: l2,
right: r2,
},
) if l1 == l2 && r1.expression.is_one() && r2.expression.is_zero() => {
if let Expression::ConditionalExpression {
condition: c,
consequent: e,
alternate: one,
} = &l1.expression
{
if one.expression.is_one() {
let not_c = c.logical_not();
let e_eq_0 = e.equals(Rc::new(ConstantDomain::U128(0).into()));
let e_eq_1 = e.equals(Rc::new(ConstantDomain::U128(1).into()));
return not_c.or(e_eq_0).or(e_eq_1);
}
}
unsimplified(self, other)
}
// [(x && y) || x] -> x
// [(x && y) || y] -> y
(Expression::And { left: x, right: y }, _) if *x == other || *y == other => other,
// [x || (x && y)] -> x
// [y || (x && y)] -> y
(_, Expression::And { left: x, right: y }) if *x == *self || *y == *self => {
self.clone()
}
// [x || (!x && z)] -> x || z
(_, Expression::And { left: y, right: z }) if self.inverse_implies(y) => {
self.or(z.clone())
}
// [(x && y) || (!x || !y)] -> true
(Expression::And { left: x, right: y }, Expression::Or { left, right })
if x.inverse_implies(left) && y.inverse_implies(right) =>
{
Rc::new(TRUE)
}
// [(x && !y) || !(x || y)] -> !y
(Expression::And { left: x1, right }, Expression::LogicalNot { operand })
if matches!(right.expression, Expression::LogicalNot { .. })
&& matches!(operand.expression, Expression::Or { .. }) =>
{
if let (
Expression::LogicalNot { operand: y1 },
Expression::Or {
left: x2,
right: y2,
},
) = (&right.expression, &operand.expression)
{
if x1.eq(x2) && y1.eq(y2) {
return right.clone();
}
}
unsimplified(self, other)
}
// [(x && !y) || y] -> (y || x)
(Expression::And { left: x, right }, _) => match &right.expression {
Expression::LogicalNot { operand: y } if *y == other => y.or(x.clone()),
_ => unsimplified(self, other),
},
// [x || !(x || y)] -> x || !y
(_, Expression::LogicalNot { operand }) => match &operand.expression {
Expression::Or { left: x2, right: y } if *self == *x2 => {
self.or(y.logical_not())
}
_ => unsimplified(self, other),
},
_ => unsimplified(self, other),
}
}
}
/// Adds any abstract heap addresses and strings found in the associated expression to the given set.
#[logfn_inputs(TRACE)]
fn record_heap_blocks_and_strings(&self, result: &mut HashSet<Rc<AbstractValue>>) {
self.expression.record_heap_blocks_and_strings(result);
}
/// Returns an element that is "self % other".
#[logfn_inputs(TRACE)]
fn remainder(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
// [(x as t) % c] -> x % c if c.is_power_of_two() && c <= t.modulo_value()
if let Expression::Cast {
operand: x,
target_type: t,
..
} = &self.expression
{
if let Expression::CompileTimeConstant(ConstantDomain::U128(c)) = &other.expression {
if c.is_power_of_two()
&& other
.less_or_equal(t.modulo_value())
.as_bool_if_known()
.unwrap_or(false)
{
return x.remainder(other);
}
}
}
self.try_to_simplify_binary_op(other, ConstantDomain::rem, Self::remainder, |l, r| {
AbstractValue::make_binary(l, r, |left, right| Expression::Rem { left, right })
})
}
/// If self refers to any variable in the given set, return TRUE otherwise return self.
/// In the case where self is a conjunction apply the function to the conjuncts and return
/// a new conjunction. The nett effect is that if self is a conjunction, such as a entry condition,
/// it is purged of any conjuncts that depend on variables (expected to be the set of variables
/// modified by a loop body).
#[logfn_inputs(TRACE)]
fn remove_conjuncts_that_depend_on(&self, variables: &HashSet<Rc<Path>>) -> Rc<AbstractValue> {
if let Expression::And { left, right } = &self.expression {
let purged_left = left.remove_conjuncts_that_depend_on(variables);
let purged_right = right.remove_conjuncts_that_depend_on(variables);
purged_left.and(purged_right)
} else if self.uses(variables) {
Rc::new(self::TRUE)
} else {
self.clone()
}
}
/// Returns an element that is "self << other".
#[logfn_inputs(TRACE)]
fn shift_left(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.shl(v2);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_binary(self.clone(), other, |left, right| Expression::Shl {
left,
right,
})
}
/// Returns an element that is true if "self << other" shifts away all bits.
#[logfn_inputs(TRACE)]
fn shl_overflows(
&self,
other: Rc<AbstractValue>,
target_type: ExpressionType,
) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.shl_overflows(v2, &target_type);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
let interval = other.get_cached_interval();
if interval.is_contained_in_width_of(&target_type) {
return Rc::new(FALSE);
}
AbstractValue::make_typed_binary(
self.clone(),
other,
target_type,
|left, right, result_type| Expression::ShlOverflows {
left,
right,
result_type,
},
)
}
/// Returns an element that is "self >> other".
#[logfn_inputs(TRACE)]
fn shr(&self, other: Rc<AbstractValue>, expression_type: ExpressionType) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.shr(v2);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
AbstractValue::make_typed_binary(
self.clone(),
other,
expression_type,
|left, right, result_type| Expression::Shr {
left,
right,
result_type,
},
)
}
/// Returns an element that is true if "self >> other" shifts away all bits.
#[logfn_inputs(TRACE)]
fn shr_overflows(
&self,
other: Rc<AbstractValue>,
target_type: ExpressionType,
) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.shr_overflows(v2, &target_type);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
let interval = &other.get_cached_interval();
if interval.is_contained_in_width_of(&target_type) {
return Rc::new(FALSE);
}
AbstractValue::make_typed_binary(
self.clone(),
other,
target_type,
|left, right, result_type| Expression::ShrOverflows {
left,
right,
result_type,
},
)
}
/// Returns an element that is "self - other".
#[logfn_inputs(TRACE)]
fn subtract(&self, other: Rc<AbstractValue>) -> Rc<AbstractValue> {
// [0 - other] -> -other
if let Expression::CompileTimeConstant(ConstantDomain::I128(0))
| Expression::CompileTimeConstant(ConstantDomain::U128(0)) = &self.expression
{
return other.negate();
};
// [self - (- operand)] -> self + operand
if let Expression::Neg { operand } = &other.expression {
return self.addition(operand.clone());
}
if let (
Expression::Cast {
operand: left,
target_type: ExpressionType::Usize,
},
Expression::Cast {
operand: right,
target_type: ExpressionType::Usize,
},
) = (&self.expression, &other.expression)
{
if let (
Expression::Offset {
left: base,
right: offset,
},
Expression::Reference(..),
) = (&left.expression, &right.expression)
{
if base.eq(right) {
return offset.clone();
}
}
if let (
Expression::Variable {
path: left_path,
var_type: ExpressionType::ThinPointer,
},
Expression::Variable {
path: right_path,
var_type: ExpressionType::ThinPointer,
},
) = (&left.expression, &right.expression)
{
if let PathEnum::Offset { value } = &left_path.value {
if let Expression::Offset {
left: base,
right: offset,
} = &value.expression
{
if let PathEnum::Computed { value: rv }
| PathEnum::HeapBlock { value: rv }
| PathEnum::Offset { value: rv } = &right_path.value
{
if rv.eq(base) {
return offset.clone();
}
}
}
}
}
}
self.try_to_simplify_binary_op(other, ConstantDomain::sub, Self::subtract, |l, r| {
AbstractValue::make_binary(l, r, |left, right| Expression::Sub { left, right })
})
}
/// Returns an element that is true if "self - other" is not in range of target_type.
#[logfn_inputs(TRACE)]
fn sub_overflows(
&self,
other: Rc<AbstractValue>,
target_type: ExpressionType,
) -> Rc<AbstractValue> {
if let (Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) =
(&self.expression, &other.expression)
{
let result = v1.sub_overflows(v2, &target_type);
if result != ConstantDomain::Bottom {
return Rc::new(result.into());
}
};
let interval = self.get_cached_interval().sub(&other.get_cached_interval());
if interval.is_contained_in(&target_type) {
return Rc::new(FALSE);
}
AbstractValue::make_typed_binary(
self.clone(),
other,
target_type,
|left, right, result_type| Expression::SubOverflows {
left,
right,
result_type,
},
)
}
/// True if all of the concrete values that correspond to self also correspond to other.
/// Note: !x.subset(y) does not imply y.subset(x).
#[logfn_inputs(TRACE)]
fn subset(&self, other: &Rc<AbstractValue>) -> bool {
if self.expression.eq(&other.expression) {
return true;
}
match (&self.expression, &other.expression) {
// The empty set is a subset of every other set.
(Expression::Bottom, _) => true,
// A non empty set is not a subset of the empty set.
(_, Expression::Bottom) => false,
// Every set is a subset of the universal set.
(_, Expression::Top) => true,
// The universal set is not a subset of any set other than the universal set.
(Expression::Top, _) => false,
// Widened expressions are equal if their paths are equal, regardless of their operand values.
(
Expression::WidenedJoin { path: p1, .. },
Expression::WidenedJoin { path: p2, .. },
) => *p1 == *p2,
// (condition ? consequent : alternate) is a subset of x if both consequent and alternate are subsets of x.
(
Expression::ConditionalExpression {
consequent,
alternate,
..
},
_,
) => {
// This is a conservative answer. False does not imply other.subset(self).
consequent.subset(other) && alternate.subset(other)
}
// x is a subset of (condition ? consequent : alternate) if x is a subset of consequent or alternate.
(
_,
Expression::ConditionalExpression {
consequent,
alternate,
..
},
) => {
// This is a conservative answer. False does not imply other.subset(self).
self.subset(&consequent) || self.subset(&alternate)
}
// x subset widen { z } if x subset z
(_, Expression::WidenedJoin { operand, .. }) => self.subset(&operand),
// (left join right) is a subset of x if both left and right are subsets of x.
(Expression::Join { left, right, .. }, _) => {
// This is a conservative answer. False does not imply other.subset(self).
left.subset(other) && right.subset(other)
}
// x is a subset of (left join right) if x is a subset of either left or right.
(_, Expression::Join { left, right, .. }) => {
// This is a conservative answer. False does not imply other.subset(self).
self.subset(&left) || self.subset(&right)
}
_ => false,
}
}
/// Constructs a switch value.
#[logfn_inputs(TRACE)]
#[logfn(TRACE)]
fn switch(
&self,
mut cases: Vec<(Rc<AbstractValue>, Rc<AbstractValue>)>,
default: Rc<AbstractValue>,
) -> Rc<AbstractValue> {
if self.is_compile_time_constant()
&& cases
.iter()
.all(|(case_val, _)| case_val.is_compile_time_constant())
{
return if let Some((_, case_result)) = cases.iter().find(|(case_val, _)| {
self.equals(case_val.clone())
.as_bool_if_known()
.unwrap_or(false)
}) {
case_result.clone()
} else {
default
};
}
if let Expression::Switch {
discriminator,
cases: default_cases,
default: default_default,
} = &default.expression
{
if self.eq(discriminator) {
cases.append(&mut default_cases.clone());
return self.switch(cases, default_default.clone());
}
}
let expression_size = self
.expression_size
.wrapping_add(default.expression_size)
.wrapping_add(cases.iter().fold(0u64, |acc, (x, y)| {
acc.wrapping_add(x.expression_size)
.wrapping_add(y.expression_size)
}));
AbstractValue::make_from(
Expression::Switch {
discriminator: self.clone(),
cases,
default,
},
expression_size,
)
}
/// Tries to simplify operation(self, other) by constant folding or by distribution
/// the operation over self and/or other.
/// Returns operation(self, other) if no simplification is possible.
#[logfn(TRACE)]
fn try_to_simplify_binary_op(
&self,
other: Rc<AbstractValue>,
const_op: fn(&ConstantDomain, &ConstantDomain) -> ConstantDomain,
recursive_op: fn(&Rc<AbstractValue>, Rc<AbstractValue>) -> Rc<AbstractValue>,
operation: fn(Rc<AbstractValue>, Rc<AbstractValue>) -> Rc<AbstractValue>,
) -> Rc<AbstractValue> {
match (&self.expression, &other.expression) {
(Expression::CompileTimeConstant(v1), Expression::CompileTimeConstant(v2)) => {
let result = const_op(v1, v2);
if result == ConstantDomain::Bottom {
self.try_to_distribute_binary_op(other, recursive_op, operation)
} else {
Rc::new(result.into())
}
}
_ => self.try_to_distribute_binary_op(other, recursive_op, operation),
}
}
/// Tries to distribute the operation over self and/or other.
/// Return operation(self, other) if no simplification is possible.
#[logfn(TRACE)]
fn try_to_distribute_binary_op(
&self,
other: Rc<AbstractValue>,
recursive_op: fn(&Rc<AbstractValue>, Rc<AbstractValue>) -> Rc<AbstractValue>,
operation: fn(Rc<AbstractValue>, Rc<AbstractValue>) -> Rc<AbstractValue>,
) -> Rc<AbstractValue> {
if let ConditionalExpression {
condition,
consequent,
alternate,
} = &self.expression
{
return condition.conditional_expression(
recursive_op(consequent, other.clone()),
recursive_op(alternate, other.clone()),
);
};
if let ConditionalExpression {
condition,
consequent,
alternate,
} = &other.expression
{
return condition.conditional_expression(
recursive_op(self, consequent.clone()),
recursive_op(self, alternate.clone()),
);
};
if let Join { left, right, path } = &self.expression {
return recursive_op(left, other.clone()).join(recursive_op(right, other), &path);
}
if let Join { left, right, path } = &other.expression {
return recursive_op(self, left.clone()).join(recursive_op(self, right.clone()), &path);
}
operation(self.clone(), other)
}
/// Gets or constructs an interval that is cached.
#[logfn_inputs(TRACE)]
fn get_cached_interval(&self) -> Rc<IntervalDomain> {
{
let mut cached_interval = self.interval.borrow_mut();
let interval_opt = cached_interval.as_ref();
if let Some(interval) = interval_opt {
return interval.clone();
}
let interval = self.get_as_interval();
*cached_interval = Some(Rc::new(interval));
}
self.get_cached_interval()
}
/// Constructs an element of the Interval domain for simple expressions.
#[logfn_inputs(TRACE)]
fn get_as_interval(&self) -> IntervalDomain {
match &self.expression {
Expression::Top => interval_domain::BOTTOM,
Expression::Add { left, right } => left.get_as_interval().add(&right.get_as_interval()),
Expression::CompileTimeConstant(ConstantDomain::I128(val)) => (*val).into(),
Expression::CompileTimeConstant(ConstantDomain::U128(val)) => (*val).into(),
Expression::ConditionalExpression {
consequent,
alternate,
..
} => consequent
.get_as_interval()
.widen(&alternate.get_as_interval()),
Expression::Join { left, right, .. } => {
left.get_as_interval().widen(&right.get_as_interval())
}
Expression::Mul { left, right } => left.get_as_interval().mul(&right.get_as_interval()),
Expression::Neg { operand } => operand.get_as_interval().neg(),
Expression::Sub { left, right } => left.get_as_interval().sub(&right.get_as_interval()),
Expression::Switch { cases, default, .. } => cases
.iter()
.fold(default.get_as_interval(), |acc, (_, result)| {
acc.widen(&result.get_as_interval())
}),
Expression::TaggedExpression { operand, .. } => operand.get_as_interval(),
Expression::WidenedJoin { operand, .. } => {
let interval = operand.get_as_interval();
if interval.is_bottom() {
return interval;
}
if let Expression::Join { left, .. } = &operand.expression {
let left_interval = left.get_as_interval();
if left_interval.is_bottom() {
return interval_domain::BOTTOM;
}
match (left_interval.lower_bound(), interval.lower_bound()) {
(Some(llb), Some(lb)) if llb == lb => {
// The lower bound is finite and does not change as a result of the fixed
// point computation, so we can keep it, but we remove the upper bound.
return interval.remove_upper_bound();
}
_ => (),
}
match (left_interval.upper_bound(), interval.upper_bound()) {
(Some(lub), Some(ub)) if lub == ub => {
// The upper bound is finite and does not change as a result of the fixed
// point computation, so we can keep it, but we remove the lower bound.
return interval.remove_lower_bound();
}
_ => (),
}
}
interval
}
_ => interval_domain::BOTTOM,
}
}
/// Gets or constructs a tag domain element that is cached.
#[logfn_inputs(TRACE)]
fn get_cached_tags(&self) -> Rc<TagDomain> {
{
let mut cached_tags = self.tags.borrow_mut();
let tags_opt = cached_tags.as_ref();
if let Some(tags) = tags_opt {
return tags.clone();
}
let tags = self.get_tags();
*cached_tags = Some(Rc::new(tags));
}
self.get_cached_tags()
}
/// Constructs an element of the tag domain for simple expressions.
#[logfn_inputs(TRACE)]
fn get_tags(&self) -> TagDomain {
let exp_tag_prop_opt = self.expression.get_tag_propagation();
// First deal with expressions that do not propagate tags or have special propagation behavior.
match &self.expression {
Expression::Top
| Expression::Bottom
| Expression::CompileTimeConstant { .. }
| Expression::HeapBlock { .. }
| Expression::HeapBlockLayout { .. }
| Expression::Reference { .. }
| Expression::UnknownTagCheck { .. } => return TagDomain::empty_set(),
Expression::InitialParameterValue { .. }
| Expression::UnknownModelField { .. }
| Expression::UnknownTagField { .. }
| Expression::Variable { .. } => {
// A variable is an unknown value of a place in memory.
// Therefore, the associated tags are also unknown.
return TagDomain::top();
}
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => {
// For each tag A, whether the conditional expression has tag A or not is
// (condition has tag A) or ((consequent has tag A) join (alternate has tag A)).
return condition.get_cached_tags().or(&consequent
.get_cached_tags()
.join(&alternate.get_cached_tags()));
}
Expression::Join { left, right, .. } => {
// For each tag A, whether the join expression has tag A or not is
// ((left has tag A) join (right has tag A)).
return left
.get_cached_tags()
.join(right.get_cached_tags().as_ref());
}
Expression::Switch {
discriminator,
cases,
default,
} => {
// For each tag A, whether the switch expression has tag A or not is
// (discriminator has tag A) or ((case_0 has tag A) join .. join (case_n has tag A) join (default has tag A)).
let mut tags_from_cases = (*default.get_cached_tags()).clone();
for case in cases {
tags_from_cases = tags_from_cases.join(case.1.get_cached_tags().as_ref())
}
return discriminator.get_cached_tags().or(&tags_from_cases);
}
Expression::TaggedExpression { operand, tag } => {
return operand.get_cached_tags().add_tag(*tag);
}
Expression::WidenedJoin { operand, .. } => {
let tags = operand.get_cached_tags();
return (*tags).clone();
}
_ => {
verify!(exp_tag_prop_opt.is_some());
}
}
let exp_tag_prop = exp_tag_prop_opt.unwrap();
// Then deal with expressions that have standard propagation behavior, i.e., taking tags
// from children nodes.
match &self.expression {
Expression::Add { left, right }
| Expression::AddOverflows { left, right, .. }
| Expression::And { left, right }
| Expression::BitAnd { left, right }
| Expression::BitOr { left, right }
| Expression::BitXor { left, right }
| Expression::Div { left, right }
| Expression::Equals { left, right }
| Expression::GreaterOrEqual { left, right }
| Expression::GreaterThan { left, right }
| Expression::IntrinsicBinary { left, right, .. }
| Expression::LessOrEqual { left, right }
| Expression::LessThan { left, right }
| Expression::Mul { left, right }
| Expression::MulOverflows { left, right, .. }
| Expression::Ne { left, right }
| Expression::Or { left, right }
| Expression::Offset { left, right }
| Expression::Rem { left, right }
| Expression::Shl { left, right }
| Expression::ShlOverflows { left, right, .. }
| Expression::Shr { left, right, .. }
| Expression::ShrOverflows { left, right, .. }
| Expression::Sub { left, right }
| Expression::SubOverflows { left, right, .. } => left
.get_cached_tags()
.propagate_through(exp_tag_prop)
.or(&right.get_cached_tags().propagate_through(exp_tag_prop)),
Expression::BitNot { operand, .. }
| Expression::Cast { operand, .. }
| Expression::IntrinsicBitVectorUnary { operand, .. }
| Expression::IntrinsicFloatingPointUnary { operand, .. }
| Expression::LogicalNot { operand, .. }
| Expression::Neg { operand, .. } => {
operand.get_cached_tags().propagate_through(exp_tag_prop)
}
Expression::Memcmp {
left,
right,
length,
} => left
.get_cached_tags()
.propagate_through(exp_tag_prop)
.or(&right.get_cached_tags().propagate_through(exp_tag_prop))
.or(&length.get_cached_tags().propagate_through(exp_tag_prop)),
Expression::UninterpretedCall {
callee, arguments, ..
} => {
let mut tags = callee.get_cached_tags().propagate_through(exp_tag_prop);
for argument in arguments {
tags = tags.or(&argument.get_cached_tags().propagate_through(exp_tag_prop))
}
tags
}
_ => {
verify_unreachable!();
}
}
}
/// Returns a subexpression that is a widened expression at the given path.
/// Returns None if no such expression can be found.
#[logfn_inputs(TRACE)]
fn get_widened_subexpression(&self, path: &Rc<Path>) -> Option<Rc<AbstractValue>> {
match &self.expression {
Expression::Bottom | Expression::Top => None,
Expression::Add { left, right }
| Expression::AddOverflows { left, right, .. }
| Expression::And { left, right }
| Expression::BitAnd { left, right }
| Expression::BitOr { left, right }
| Expression::BitXor { left, right }
| Expression::Div { left, right }
| Expression::Equals { left, right }
| Expression::GreaterOrEqual { left, right }
| Expression::GreaterThan { left, right }
| Expression::IntrinsicBinary { left, right, .. }
| Expression::Join { left, right, .. }
| Expression::LessOrEqual { left, right }
| Expression::LessThan { left, right }
| Expression::Mul { left, right }
| Expression::MulOverflows { left, right, .. }
| Expression::Ne { left, right }
| Expression::Offset { left, right }
| Expression::Or { left, right }
| Expression::Rem { left, right }
| Expression::Shl { left, right }
| Expression::ShlOverflows { left, right, .. }
| Expression::Shr { left, right, .. }
| Expression::ShrOverflows { left, right, .. }
| Expression::Sub { left, right }
| Expression::SubOverflows { left, right, .. } => left
.get_widened_subexpression(path)
.or_else(|| right.get_widened_subexpression(path)),
Expression::BitNot { operand, .. }
| Expression::Cast { operand, .. }
| Expression::IntrinsicBitVectorUnary { operand, .. }
| Expression::IntrinsicFloatingPointUnary { operand, .. }
| Expression::Neg { operand }
| Expression::LogicalNot { operand }
| Expression::TaggedExpression { operand, .. }
| Expression::UnknownTagCheck { operand, .. } => {
operand.get_widened_subexpression(path)
}
Expression::CompileTimeConstant(..) => None,
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => condition.get_widened_subexpression(path).or_else(|| {
consequent
.get_widened_subexpression(path)
.or_else(|| alternate.get_widened_subexpression(path))
}),
Expression::HeapBlock { .. } => None,
Expression::HeapBlockLayout {
length, alignment, ..
} => length
.get_widened_subexpression(path)
.or_else(|| alignment.get_widened_subexpression(path)),
Expression::Memcmp {
left,
right,
length,
} => left.get_widened_subexpression(path).or_else(|| {
right
.get_widened_subexpression(path)
.or_else(|| length.get_widened_subexpression(path))
}),
Expression::Reference(..) => None,
Expression::InitialParameterValue { .. } => None,
Expression::Switch {
discriminator,
cases,
default,
} => discriminator.get_widened_subexpression(path).or_else(|| {
default.get_widened_subexpression(path).or_else(|| {
cases.iter().find_map(|(case_val, result_val)| {
case_val
.get_widened_subexpression(path)
.or_else(|| result_val.get_widened_subexpression(path))
})
})
}),
Expression::UninterpretedCall {
callee,
arguments: args,
..
} => callee.get_widened_subexpression(path).or_else(|| {
args.iter()
.find_map(|arg| arg.get_widened_subexpression(path))
}),
Expression::UnknownModelField { .. } => None,
Expression::UnknownTagField { .. } => None,
Expression::Variable { .. } => None,
Expression::WidenedJoin { path: p, .. } => {
if p.eq(path) {
Some(self.clone())
} else {
None
}
}
}
}
/// Returns a value that is simplified (refined) by replacing parameter values
/// with their corresponding argument values. If no refinement is possible
/// the result is simply a clone of this value.
#[logfn_inputs(TRACE)]
fn refine_parameters_and_paths(
&self,
args: &[(Rc<Path>, Rc<AbstractValue>)],
result: &Option<Rc<Path>>,
pre_env: &Environment,
post_env: &Environment,
// An offset to add to locals from the called function so that they do not clash with caller locals.
fresh: usize,
) -> Rc<AbstractValue> {
match &self.expression {
Expression::Bottom | Expression::Top => self.clone(),
Expression::Add { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.addition(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::AddOverflows {
left,
right,
result_type,
} => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.add_overflows(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
result_type.clone(),
),
Expression::And { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.and(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::BitAnd { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.bit_and(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::BitNot {
operand,
result_type,
} => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.bit_not(result_type.clone()),
Expression::BitOr { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.bit_or(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::BitXor { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.bit_xor(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::Cast {
operand,
target_type,
} => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.cast(target_type.clone()),
Expression::CompileTimeConstant(..) => self.clone(),
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => condition
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.conditional_expression(
consequent.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
alternate.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::Div { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.divide(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::Equals { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.equals(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::GreaterOrEqual { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.greater_or_equal(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::GreaterThan { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.greater_than(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::HeapBlock {
abstract_address,
is_zeroed,
} => AbstractValue::make_from(
Expression::HeapBlock {
abstract_address: *abstract_address + fresh,
is_zeroed: *is_zeroed,
},
1,
),
Expression::HeapBlockLayout {
length,
alignment,
source,
} => AbstractValue::make_from(
Expression::HeapBlockLayout {
length: length
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
alignment: alignment
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
source: *source,
},
1,
),
Expression::InitialParameterValue { path, var_type } => {
let refined_path =
path.refine_parameters_and_paths(args, result, pre_env, pre_env, fresh);
if let PathEnum::Computed { value }
| PathEnum::HeapBlock { value }
| PathEnum::Offset { value } = &refined_path.value
{
return value.clone();
} else if let Some(val) = pre_env.value_at(&refined_path) {
return val.clone();
}
// If the path does not have a known value in the pre environment, make an unknown
// value. If the path is still rooted in parameter make sure that it does not get
// affected by subsequent side effects on the parameter.
if refined_path.is_rooted_by_parameter() {
// This will not get refined again
AbstractValue::make_initial_parameter_value(var_type.clone(), refined_path)
} else {
// The value is rooted in a local variable leaked from the callee or
// in a static. In the latter case we want lookup_and_refine_value to
// to see this. In the former, refinement is a no-op.
AbstractValue::make_typed_unknown(var_type.clone(), refined_path)
}
}
Expression::IntrinsicBinary { left, right, name } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.intrinsic_binary(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
*name,
),
Expression::IntrinsicBitVectorUnary {
operand,
bit_length,
name,
} => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.intrinsic_bit_vector_unary(*bit_length, *name),
Expression::IntrinsicFloatingPointUnary { operand, name } => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.intrinsic_floating_point_unary(*name),
Expression::Join { left, right, path } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.join(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
&path.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::LessOrEqual { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.less_or_equal(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::LessThan { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.less_than(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::LogicalNot { operand } => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.logical_not(),
Expression::Memcmp {
left,
right,
length,
} => {
let refined_left =
left.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
let refined_right =
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
let refined_length =
length.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
let arr1 = refined_left.try_resolve_as_byte_array(post_env);
let arr2 = refined_right.try_resolve_as_byte_array(post_env);
if let (Some(arr1), Some(arr2)) = (&arr1, &arr2) {
return Rc::new(ConstantDomain::I128(arr1.cmp(&arr2) as i32 as i128).into());
}
let str1 = refined_left.try_resolve_as_ref_to_str(post_env);
let str2 = refined_right.try_resolve_as_ref_to_str(post_env);
if let (Some(str1), Some(str2)) = (str1, str2) {
return Rc::new(ConstantDomain::I128(str1.cmp(&str2) as i32 as i128).into());
}
AbstractValue::make_memcmp(refined_left, refined_right, refined_length)
}
Expression::Mul { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.multiply(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::MulOverflows {
left,
right,
result_type,
} => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.mul_overflows(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
result_type.clone(),
),
Expression::Ne { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.not_equals(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::Neg { operand } => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.negate(),
Expression::Offset { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.offset(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::Or { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.or(right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
Expression::Reference(path) => {
// if the path is a parameter, the reference is an artifact of its type
// and needs to be removed in the call context
match &path.value {
PathEnum::Parameter { ordinal } => args[*ordinal - 1].1.clone(),
_ => {
let refined_path = path
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
AbstractValue::make_reference(refined_path)
}
}
}
Expression::Rem { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.remainder(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::Shl { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.shift_left(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::ShlOverflows {
left,
right,
result_type,
} => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.shl_overflows(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
result_type.clone(),
),
Expression::Shr {
left,
right,
result_type,
} => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.shr(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
result_type.clone(),
),
Expression::ShrOverflows {
left,
right,
result_type,
} => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.shr_overflows(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
result_type.clone(),
),
Expression::Sub { left, right } => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.subtract(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::SubOverflows {
left,
right,
result_type,
} => left
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.sub_overflows(
right.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
result_type.clone(),
),
Expression::Switch {
discriminator,
cases,
default,
} => discriminator
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.switch(
cases
.iter()
.map(|(case_val, result_val)| {
(
case_val.refine_parameters_and_paths(
args, result, pre_env, post_env, fresh,
),
result_val.refine_parameters_and_paths(
args, result, pre_env, post_env, fresh,
),
)
})
.collect(),
default.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
),
Expression::TaggedExpression { operand, tag } => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.add_tag(*tag),
Expression::UninterpretedCall {
callee,
arguments,
result_type,
path,
} => {
let refined_callee =
callee.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
let refined_arguments = arguments
.iter()
.map(|arg| {
arg.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
})
.collect();
let refined_path =
path.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
refined_callee.uninterpreted_call(
refined_arguments,
result_type.clone(),
refined_path,
)
}
Expression::UnknownModelField { path, default } => {
let refined_path =
path.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
if !matches!(&refined_path.value, PathEnum::Computed { .. }) {
if let Some(val) = post_env.value_at(&refined_path) {
// This environment has a value for the model field.
val.clone()
} else if refined_path.is_rooted_by_parameter() {
// Keep passing the buck to the next caller.
AbstractValue::make_from(
Expression::UnknownModelField {
path: refined_path,
default: default.clone(),
},
default.expression_size.saturating_add(1),
)
} else {
// The buck stops here and the environment does not have a value for model field.
default.clone()
}
} else {
AbstractValue::make_from(
Expression::UnknownModelField {
path: refined_path,
default: default.clone(),
},
1,
)
}
}
Expression::UnknownTagCheck {
operand,
tag,
checking_presence,
} => AbstractValue::make_tag_check(
operand.refine_parameters_and_paths(args, result, pre_env, post_env, fresh),
*tag,
*checking_presence,
),
Expression::UnknownTagField { path } => {
let refined_path =
path.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
if !matches!(&refined_path.value, PathEnum::Computed { .. }) {
if let Some(val) = post_env.value_at(&refined_path) {
// This environment has a value for the tag field.
val.clone()
} else if !refined_path.is_rooted_by_parameter() {
// Return the dummy untagged value if refined_path is not rooted by a function parameter.
Rc::new(DUMMY_UNTAGGED_VALUE)
} else {
// Otherwise, return again an unknown tag field.
AbstractValue::make_from(
Expression::UnknownTagField { path: refined_path },
1,
)
}
} else {
AbstractValue::make_from(Expression::UnknownTagField { path: refined_path }, 1)
}
}
Expression::Variable { path, var_type } => {
let refined_path =
path.refine_parameters_and_paths(args, result, pre_env, post_env, fresh);
if let PathEnum::Computed { value }
| PathEnum::HeapBlock { value }
| PathEnum::Offset { value } = &refined_path.value
{
value.clone()
} else if let Some(val) = post_env.value_at(&refined_path) {
val.clone()
} else if refined_path == *path {
self.clone()
} else {
AbstractValue::make_typed_unknown(var_type.clone(), refined_path)
}
}
Expression::WidenedJoin { path, operand, .. } => operand
.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)
.widen(&path.refine_parameters_and_paths(args, result, pre_env, post_env, fresh)),
}
}
/// Returns a domain that is simplified (refined) by using the current path conditions
/// (conditions known to be true in the current context). If no refinement is possible
/// the result is simply a clone of this domain.
///
/// This function is performance critical and involves a tricky trade-off: Invoking it
/// is expensive, particularly when expressions get large (hence k_limits::MAX_EXPRESSION_SIZE).
/// One reason for this is that expressions are traversed without doing any kind of occurs check,
/// so expressions that are not large in memory usage (because of sharing) can still be too large
/// to traverse. Currently there is no really efficient way to add an occurs check, so the
/// k-limit approach is cheaper, at the cost of losing precision.
///
/// On the other hand, getting rid of this refinement (and the k-limits it needs) will cause
/// a lot of expressions to get much larger because of joining and composing. This will increase
/// the cost of refine_parameters, which is essential. Likewise, it wil also increase the cost
/// of refine_paths, which ensures that paths stay unique (dealing with aliasing is expensive).
#[logfn_inputs(TRACE)]
fn refine_with(&self, path_condition: &Self, depth: usize) -> Rc<AbstractValue> {
if self.is_bottom() || self.is_top() {
return self.clone();
};
//do not use false path conditions to refine things
checked_precondition!(path_condition.as_bool_if_known().is_none());
if depth >= k_limits::MAX_REFINE_DEPTH {
info!("max refine depth exceeded during refine_with");
//todo: perhaps this should go away.
// right now it deals with the situation where some large expressions have sizes
// that are not accurately tracked. These really should get fixed.
return self.clone();
}
// In this context path_condition is true
if path_condition.eq(self) {
return Rc::new(TRUE);
}
// If the path context constrains the self expression to be equal to a constant, just
// return the constant.
if let Expression::Equals { left, right } = &path_condition.expression {
if let Expression::CompileTimeConstant(..) = &left.expression {
if self.eq(right) {
return left.clone();
}
}
if let Expression::CompileTimeConstant(..) = &right.expression {
if self.eq(left) {
return right.clone();
}
}
}
// Traverse the self expression, looking for recursive refinement opportunities.
// Important, keep the traversal as trivial as possible and put optimizations in
// the transfer functions. Also, keep the transfer functions constant in cost as
// much as possible. Any time they are not, this function becomes quadratic and
// performance becomes terrible.
match &self.expression {
Expression::Bottom | Expression::Top => self.clone(),
Expression::Add { left, right } => left
.refine_with(path_condition, depth + 1)
.addition(right.refine_with(path_condition, depth + 1)),
Expression::AddOverflows {
left,
right,
result_type,
} => left.refine_with(path_condition, depth + 1).add_overflows(
right.refine_with(path_condition, depth + 1),
result_type.clone(),
),
Expression::And { left, right } => left
.refine_with(path_condition, depth + 1)
.and(right.refine_with(path_condition, depth + 1)),
Expression::BitAnd { left, right } => left
.refine_with(path_condition, depth + 1)
.bit_and(right.refine_with(path_condition, depth + 1)),
Expression::BitNot {
operand,
result_type,
} => operand
.refine_with(path_condition, depth + 1)
.bit_not(result_type.clone()),
Expression::BitOr { left, right } => left
.refine_with(path_condition, depth + 1)
.bit_or(right.refine_with(path_condition, depth + 1)),
Expression::BitXor { left, right } => left
.refine_with(path_condition, depth + 1)
.bit_xor(right.refine_with(path_condition, depth + 1)),
Expression::Cast {
operand,
target_type,
} => operand
.refine_with(path_condition, depth + 1)
.cast(target_type.clone()),
Expression::CompileTimeConstant(..) => self.clone(),
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => {
// The implies checks should be redundant, but currently help with precision
// presumably because they are not k-limited like the refinement of the path
// condition. They might also help with performance because they avoid
// two refinements and the expensive and constructor, if they succeed.
// If they mostly fail, they will cost more than they save. It is not
// clear at this point if they are a win, but they are kept for the sake of precision.
if path_condition.implies(&condition) {
consequent.refine_with(path_condition, depth + 1)
} else if path_condition.implies_not(&condition) {
alternate.refine_with(path_condition, depth + 1)
} else {
let refined_condition = condition.refine_with(path_condition, depth + 1);
let refined_condition_as_bool = refined_condition.as_bool_if_known();
let refined_consequent = consequent.refine_with(path_condition, depth + 1);
if refined_condition_as_bool.unwrap_or(false) {
return refined_consequent;
}
let refined_alternate = alternate.refine_with(path_condition, depth + 1);
if !refined_condition_as_bool.unwrap_or(true) {
return refined_alternate;
}
refined_condition.conditional_expression(refined_consequent, refined_alternate)
}
}
Expression::Div { left, right } => left
.refine_with(path_condition, depth + 1)
.divide(right.refine_with(path_condition, depth + 1)),
Expression::Equals { left, right } => left
.refine_with(path_condition, depth + 1)
.equals(right.refine_with(path_condition, depth + 1)),
Expression::GreaterOrEqual { left, right } => left
.refine_with(path_condition, depth + 1)
.greater_or_equal(right.refine_with(path_condition, depth + 1)),
Expression::GreaterThan { left, right } => left
.refine_with(path_condition, depth + 1)
.greater_than(right.refine_with(path_condition, depth + 1)),
Expression::IntrinsicBinary { left, right, name } => left
.refine_with(path_condition, depth + 1)
.intrinsic_binary(right.refine_with(path_condition, depth + 1), *name),
Expression::IntrinsicBitVectorUnary {
operand,
bit_length,
name,
} => operand
.refine_with(path_condition, depth + 1)
.intrinsic_bit_vector_unary(*bit_length, *name),
Expression::HeapBlock { .. } => self.clone(),
Expression::HeapBlockLayout {
length,
alignment,
source,
} => AbstractValue::make_from(
Expression::HeapBlockLayout {
length: length.refine_with(path_condition, depth + 1),
alignment: alignment.refine_with(path_condition, depth + 1),
source: *source,
},
1,
),
Expression::IntrinsicFloatingPointUnary { operand, name } => operand
.refine_with(path_condition, depth + 1)
.intrinsic_floating_point_unary(*name),
Expression::Join { left, right, path } => left
.refine_with(path_condition, depth + 1)
.join(right.refine_with(path_condition, depth + 1), &path),
Expression::LessOrEqual { left, right } => left
.refine_with(path_condition, depth + 1)
.less_or_equal(right.refine_with(path_condition, depth + 1)),
Expression::LessThan { left, right } => left
.refine_with(path_condition, depth + 1)
.less_than(right.refine_with(path_condition, depth + 1)),
Expression::Memcmp {
left,
right,
length,
} => {
let refined_length = length.refine_with(path_condition, depth + 1);
AbstractValue::make_memcmp(left.clone(), right.clone(), refined_length)
}
Expression::Mul { left, right } => left
.refine_with(path_condition, depth + 1)
.multiply(right.refine_with(path_condition, depth + 1)),
Expression::MulOverflows {
left,
right,
result_type,
} => left.refine_with(path_condition, depth + 1).mul_overflows(
right.refine_with(path_condition, depth + 1),
result_type.clone(),
),
Expression::Ne { left, right } => left
.refine_with(path_condition, depth + 1)
.not_equals(right.refine_with(path_condition, depth + 1)),
Expression::Neg { operand } => operand.refine_with(path_condition, depth + 1).negate(),
Expression::LogicalNot { operand } => {
operand.refine_with(path_condition, depth + 1).logical_not()
}
Expression::Offset { left, right } => left
.refine_with(path_condition, depth + 1)
.offset(right.refine_with(path_condition, depth + 1)),
Expression::Or { left, right } => {
// Ideally the constructor should do the simplifications, but in practice or
// expressions grow quite large due to composition and it really helps to avoid
// refining the right expression whenever possible, even at the expense of
// more checks here. If the performance of implies and implies_not should become
// significantly worse than it is now, this could become a performance bottle neck.
if path_condition.implies(&left) || path_condition.implies(&right) {
Rc::new(TRUE)
} else if path_condition.implies_not(&left) {
if path_condition.implies_not(&right) {
Rc::new(FALSE)
} else {
right.refine_with(path_condition, depth + 1)
}
} else if path_condition.implies_not(&right) {
left.refine_with(path_condition, depth + 1)
} else {
left.refine_with(path_condition, depth + 1)
.or(right.refine_with(path_condition, depth + 1))
}
}
Expression::Reference(..) | Expression::InitialParameterValue { .. } => {
// We could refine their paths, which will increase precision, but it does not
// currently seem cost-effective. This does not affect soundness.
self.clone()
}
Expression::Rem { left, right } => left
.refine_with(path_condition, depth + 1)
.remainder(right.refine_with(path_condition, depth + 1)),
Expression::Shl { left, right } => left
.refine_with(path_condition, depth + 1)
.shift_left(right.refine_with(path_condition, depth + 1)),
Expression::ShlOverflows {
left,
right,
result_type,
} => left.refine_with(path_condition, depth + 1).shl_overflows(
right.refine_with(path_condition, depth + 1),
result_type.clone(),
),
Expression::Shr {
left,
right,
result_type,
} => left.refine_with(path_condition, depth + 1).shr(
right.refine_with(path_condition, depth + 1),
result_type.clone(),
),
Expression::ShrOverflows {
left,
right,
result_type,
} => left.refine_with(path_condition, depth + 1).shr_overflows(
right.refine_with(path_condition, depth + 1),
result_type.clone(),
),
Expression::Sub { left, right } => left
.refine_with(path_condition, depth + 1)
.subtract(right.refine_with(path_condition, depth + 1)),
Expression::SubOverflows {
left,
right,
result_type,
} => left.refine_with(path_condition, depth + 1).sub_overflows(
right.refine_with(path_condition, depth + 1),
result_type.clone(),
),
Expression::Switch {
discriminator,
cases,
default,
} => discriminator.refine_with(path_condition, depth + 1).switch(
cases
.iter()
.map(|(case_val, result_val)| {
(
case_val.refine_with(path_condition, depth + 1),
result_val.refine_with(path_condition, depth + 1),
)
})
.collect(),
default.refine_with(path_condition, depth + 1),
),
Expression::TaggedExpression { operand, tag } => {
operand.refine_with(path_condition, depth + 1).add_tag(*tag)
}
Expression::UninterpretedCall {
callee,
arguments,
result_type,
path,
} => callee
.refine_with(path_condition, depth + 1)
.uninterpreted_call(
arguments
.iter()
.map(|v| v.refine_with(path_condition, depth + 1))
.collect(),
result_type.clone(),
path.clone(),
),
Expression::UnknownModelField { .. } => self.clone(),
Expression::UnknownTagCheck {
operand,
tag,
checking_presence,
} => AbstractValue::make_tag_check(
operand.refine_with(path_condition, depth + 1),
*tag,
*checking_presence,
),
Expression::UnknownTagField { .. } => self.clone(),
Expression::Variable { var_type, .. } => {
if *var_type == ExpressionType::Bool {
if path_condition.implies(&self) {
return Rc::new(TRUE);
} else if path_condition.implies_not(&self) {
return Rc::new(FALSE);
}
}
self.clone()
}
Expression::WidenedJoin { path, operand } => {
operand.refine_with(path_condition, depth + 1).widen(&path)
}
}
}
/// A cast that re-interprets existing bits rather than doing conversions.
/// When the source type and target types differ in length, bits are truncated
/// or zero filled as appropriate.
#[logfn(TRACE)]
fn transmute(&self, target_type: ExpressionType) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(c) = &self.expression {
Rc::new(c.transmute(target_type).into())
} else if target_type.is_integer() {
self.unsigned_modulo(target_type.bit_length())
.cast(target_type)
} else if target_type == ExpressionType::Bool {
self.unsigned_modulo(target_type.bit_length())
.not_equals(Rc::new(ConstantDomain::U128(0).into()))
} else {
// todo: add an expression case that will delay transmutation until the operand refines to a constant
AbstractValue::make_typed_unknown(target_type, Path::get_as_path(self.clone()))
}
}
#[logfn_inputs(TRACE)]
#[logfn(TRACE)]
fn try_resolve_as_byte_array(&self, environment: &Environment) -> Option<Vec<u8>> {
if let Expression::Reference(path) = &self.expression {
if matches!(&path.value, PathEnum::HeapBlock { .. }) {
let heap_layout_path = Path::new_layout(path.clone());
if let Some(layout) = environment.value_at(&heap_layout_path) {
if let Expression::HeapBlockLayout { length, .. } = &layout.expression {
if let Expression::CompileTimeConstant(ConstantDomain::U128(len)) =
length.expression
{
let mut arr = Vec::with_capacity(len as usize);
for i in 0..(len as usize) {
let elem_index = Rc::new(ConstantDomain::U128(i as u128).into());
let elem_path = Path::new_index(path.clone(), elem_index);
let elem_val = environment.value_at(&elem_path);
if let Some(val) = elem_val {
if let Expression::CompileTimeConstant(ConstantDomain::U128(
v,
)) = &val.expression
{
arr.push(*v as u8);
continue;
}
}
return None;
}
return Some(arr);
}
}
}
}
}
None
}
#[logfn_inputs(TRACE)]
#[logfn(TRACE)]
fn try_resolve_as_ref_to_str(&self, _environment: &Environment) -> Option<Rc<str>> {
if let Expression::Variable {
path,
var_type: ExpressionType::ThinPointer,
} = &self.expression
{
if let PathEnum::QualifiedPath {
qualifier,
selector,
..
} = &path.value
{
if let (PathEnum::Computed { value }, PathSelector::Field(0)) =
(&qualifier.value, selector.as_ref())
{
if let Expression::CompileTimeConstant(ConstantDomain::Str(s)) =
&value.expression
{
return Some(s.clone());
}
}
}
}
None
}
/// Returns a domain whose corresponding set of concrete values include all of the values
/// that the call expression might return at runtime. The function to be called will not
/// have been summarized for some reason or another (for example, it might be a foreign function).
#[logfn_inputs(TRACE)]
fn uninterpreted_call(
&self,
arguments: Vec<Rc<AbstractValue>>,
result_type: ExpressionType,
path: Rc<Path>,
) -> Rc<AbstractValue> {
AbstractValue::make_uninterpreted_call(self.clone(), arguments, result_type, path)
}
/// Returns an expression that discards (zero fills) bits that are not in the specified number
/// of least significant bits. The result is an unsigned integer.
#[logfn(TRACE)]
fn unsigned_modulo(&self, num_bits: u8) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(c) = &self.expression {
Rc::new(c.unsigned_modulo(num_bits).into())
} else {
let power_of_two = Rc::new(ConstantDomain::U128(1 << num_bits).into());
let unsigned = self.try_to_retype_as(&ExpressionType::U128);
unsigned.remainder(power_of_two)
}
}
/// Returns an expression that shifts the bit representation of the value to the left by the
/// given number of bits, filling in with zeroes. The result is an unsigned integer.
#[logfn(TRACE)]
fn unsigned_shift_left(&self, num_bits: u8) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(c) = &self.expression {
Rc::new(c.unsigned_shift_left(num_bits).into())
} else {
let power_of_two = Rc::new(ConstantDomain::U128(1 << num_bits).into());
let unsigned = self.try_to_retype_as(&ExpressionType::U128);
unsigned.multiply(power_of_two)
}
}
/// Returns an expression that shifts the bit representation of the value to the right by the
/// given number of bits, filling in with zeroes. The result is an unsigned integer.
#[logfn(TRACE)]
fn unsigned_shift_right(&self, num_bits: u8) -> Rc<AbstractValue> {
if let Expression::CompileTimeConstant(c) = &self.expression {
Rc::new(c.unsigned_shift_right(num_bits).into())
} else {
let power_of_two = Rc::new(ConstantDomain::U128(1 << num_bits).into());
let unsigned = self.try_to_retype_as(&ExpressionType::U128);
unsigned.divide(power_of_two)
}
}
/// Returns true if the expression uses any of the variables in the given set.
#[logfn(TRACE)]
fn uses(&self, variables: &HashSet<Rc<Path>>) -> bool {
match &self.expression {
Expression::Bottom => false,
Expression::Top => true,
Expression::Add { left, right }
| Expression::AddOverflows { left, right, .. }
| Expression::And { left, right }
| Expression::BitAnd { left, right }
| Expression::BitOr { left, right }
| Expression::BitXor { left, right }
| Expression::Div { left, right }
| Expression::Equals { left, right }
| Expression::GreaterOrEqual { left, right }
| Expression::GreaterThan { left, right }
| Expression::IntrinsicBinary { left, right, .. }
| Expression::LessOrEqual { left, right }
| Expression::LessThan { left, right }
| Expression::Mul { left, right }
| Expression::MulOverflows { left, right, .. }
| Expression::Ne { left, right }
| Expression::Offset { left, right }
| Expression::Or { left, right }
| Expression::Rem { left, right }
| Expression::Shl { left, right }
| Expression::ShlOverflows { left, right, .. }
| Expression::Shr { left, right, .. }
| Expression::ShrOverflows { left, right, .. }
| Expression::Sub { left, right }
| Expression::SubOverflows { left, right, .. } => {
left.uses(variables) || right.uses(variables)
}
Expression::BitNot { operand, .. }
| Expression::Cast { operand, .. }
| Expression::IntrinsicBitVectorUnary { operand, .. }
| Expression::IntrinsicFloatingPointUnary { operand, .. }
| Expression::Neg { operand }
| Expression::LogicalNot { operand }
| Expression::TaggedExpression { operand, .. }
| Expression::UnknownTagCheck { operand, .. } => operand.uses(variables),
Expression::CompileTimeConstant(..) => false,
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => {
condition.uses(variables) || consequent.uses(variables) || alternate.uses(variables)
}
Expression::HeapBlock { .. } => false,
Expression::HeapBlockLayout {
length, alignment, ..
} => length.uses(variables) || alignment.uses(variables),
Expression::Join { left, right, path } => {
variables.contains(path) || left.uses(variables) || right.uses(variables)
}
Expression::Memcmp {
left,
right,
length,
} => left.uses(variables) || right.uses(variables) || length.uses(variables),
Expression::Reference(path)
| Expression::InitialParameterValue { path, .. }
| Expression::UnknownTagField { path }
| Expression::Variable { path, .. }
| Expression::WidenedJoin { path, .. } => variables.contains(path),
Expression::Switch {
discriminator,
cases,
default,
} => {
discriminator.uses(variables)
|| default.uses(variables)
|| cases.iter().any(|(case_val, result_val)| {
case_val.uses(variables) || result_val.uses(variables)
})
}
Expression::UninterpretedCall {
callee,
arguments: args,
..
} => callee.uses(variables) || args.iter().any(|arg| arg.uses(variables)),
Expression::UnknownModelField { path, default } => {
variables.contains(path) || default.uses(variables)
}
}
}
/// Returns an abstract value whose corresponding set of concrete values include all of the values
/// corresponding to self and other. The set of values may be less precise (more inclusive) than
/// the set returned by join. The chief requirement is that a small number of widen calls
/// deterministically lead to a set of values that include of the values that could be stored
/// in memory at the given path.
#[logfn_inputs(TRACE)]
fn widen(&self, path: &Rc<Path>) -> Rc<AbstractValue> {
match &self.expression {
Expression::Join {
path: join_path, ..
} if path.eq(join_path) => AbstractValue::make_from(
Expression::WidenedJoin {
path: path.clone(),
operand: self.clone(),
},
self.expression_size.saturating_add(1),
),
_ => self.clone(),
}
}
}
| 41.949277 | 127 | 0.501693 |
90e162308cae9c1a9e0a74715936c453dc532834 | 21,453 | use akri_shared::os::env_var::{ActualEnvVarQuery, EnvVarQuery};
use log::trace;
use rscam::Camera as RsCamera;
use rscam::Config;
/// Frames per second environment variable id
const FRAMES_PER_SECOND: &str = "FRAMES_PER_SECOND";
/// Resolution width environment variable id
const RESOLUTION_WIDTH: &str = "RESOLUTION_WIDTH";
/// Resolution height environment variable id
const RESOLUTION_HEIGHT: &str = "RESOLUTION_HEIGHT";
/// Image format environment variable id
const FORMAT: &str = "FORMAT";
/// Default is 1 fps
const DEFAULT_FRAMES_PER_SECOND: u32 = 10;
/// Default resolution width, which is also the default for rscam.
const DEFAULT_RESOLUTION_WIDTH: u32 = 640;
/// Default resolution height, which is also the default for rscam.
const DEFAULT_RESOLUTION_HEIGHT: u32 = 480;
/// Defailt format, which is also the default for rscam.
const DEFAULT_FORMAT: &str = "MJPG";
pub type Resolution = (u32, u32);
pub type Interval = (u32, u32);
/// This builds a rscamera from a specified devnode. Then, it gets desired format, resolution, and interval/fps settings from environment variables.
/// If the environment variables are not set, it will try to use default settings. If the camera does not support the defaults, the first supported setting will be used.
/// Finally, its starts the camera capturer with the selected settings and returns this camera.
pub fn build_and_start_camera_capturer(devnode: &str) -> RsCamera {
trace!("build_and_start_camera_capturer - entered");
let mut camera_capturer = RsCamera::new(devnode).unwrap();
let env_var_query = ActualEnvVarQuery {};
// Get camera formats and convert them from [u8] to String so can compare them with env and default format
let format_options: Vec<String> = camera_capturer
.formats()
.map(|wformat| {
std::str::from_utf8(&wformat.unwrap().format)
.unwrap()
.to_string()
})
.collect();
let format_string = get_format(&env_var_query, format_options);
let format = format_string[..].as_bytes();
let resolution_info = camera_capturer.resolutions(&format).unwrap();
let resolution = get_resolution(&env_var_query, resolution_info);
let interval_info = camera_capturer.intervals(&format, resolution).unwrap();
let interval = get_interval(&env_var_query, interval_info);
trace!("build_and_start_camera_capturer - before starting camera");
camera_capturer
.start(&Config {
interval,
resolution,
format,
..Default::default()
})
.unwrap();
trace!("build_and_start_camera_capturer - after starting camera");
camera_capturer
}
/// This gets the image format from an environment variable. If not set, it will use default. If default is not supported, uses first supported format.
fn get_format(env_var_query: &impl EnvVarQuery, format_options: Vec<String>) -> String {
let format_to_find = match env_var_query.get_env_var(FORMAT) {
Ok(format) => format,
Err(_) => {
trace!(
"get_format - format not set ... trying to use {:?}",
DEFAULT_FORMAT
);
DEFAULT_FORMAT.to_string()
}
};
if !format_options.contains(&format_to_find) {
if !format_options.contains(&DEFAULT_FORMAT.to_string()) {
trace!(
"get_format - camera does not support {:?} format, using {:?} format",
DEFAULT_FORMAT,
format_options[0]
);
format_options[0].clone()
} else {
trace!("get_format - using default {:?} format", DEFAULT_FORMAT);
DEFAULT_FORMAT.to_string()
}
} else {
trace!("get_format - using {:?} format", format_to_find);
format_to_find
}
}
/// This gets the desired interval/frames per second from an environment variable. If not set, it will use default. If default is not supported, uses first supported interval.
fn get_interval(env_var_query: &impl EnvVarQuery, interval_info: rscam::IntervalInfo) -> Interval {
let fps_to_validate = match env_var_query.get_env_var(FRAMES_PER_SECOND) {
Ok(res) => res.parse().unwrap(),
Err(_) => {
trace!("main - frames per second not set ... trying to use 10");
DEFAULT_FRAMES_PER_SECOND
}
};
let interval_to_validate = (1, fps_to_validate);
let interval_options = get_interval_options(interval_info);
// If the camera does not support env var interval or default, use first interval option
if !interval_options.contains(&interval_to_validate) {
trace!(
"get_interval - camera does not support {:?} interval, using {:?} interval",
interval_to_validate,
interval_options[0]
);
interval_options[0]
} else {
trace!("get_interval - using {:?} interval", interval_to_validate);
interval_to_validate
}
}
/// This gets the intervals supported by the camera
fn get_interval_options(interval_info: rscam::IntervalInfo) -> Vec<Resolution> {
match interval_info {
rscam::IntervalInfo::Discretes(interval_options) => interval_options,
rscam::IntervalInfo::Stepwise { min, max, step } => {
let mut interval_options: Vec<(u32, u32)> = Vec::new();
let width_step = step.0;
let height_step = step.1;
let min_width = min.0;
let min_height = min.1;
let max_height = max.1;
let steps = (max_height - min_height) / height_step;
for step_num in 0..steps {
let curr_width = min_width + step_num * width_step;
let curr_height = min_height + step_num * height_step;
interval_options.push((curr_width, curr_height));
}
interval_options
}
}
}
/// This calls a function to get the desired resolution from an environment variable. If not set, it will use default. If default is not supported, uses first supported resolution.
fn get_resolution(
env_var_query: &impl EnvVarQuery,
resolution_info: rscam::ResolutionInfo,
) -> Resolution {
let env_var_resolution = get_env_var_resolution(env_var_query);
let resolution_to_validate = match env_var_resolution {
Some(res) => res,
None => (DEFAULT_RESOLUTION_WIDTH, DEFAULT_RESOLUTION_HEIGHT),
};
let resolution_options = get_resolution_options(resolution_info);
// If the camera does not support env var resolution or default, use first resolution
if !resolution_options.contains(&resolution_to_validate) {
trace!(
"get_resolution - camera does not support {:?} resolution, using {:?} resolution",
resolution_to_validate,
resolution_options[0]
);
resolution_options[0]
} else {
trace!(
"get_resolution - using resolution {:?}",
resolution_to_validate
);
resolution_to_validate
}
}
/// This gets the desired resolution from an environment variable else returns None.
fn get_env_var_resolution(env_var_query: &impl EnvVarQuery) -> Option<Resolution> {
let width = match env_var_query.get_env_var(RESOLUTION_WIDTH) {
Ok(res) => res.parse().unwrap(),
Err(_) => {
trace!("get_env_var_resolution - resolution width not set");
return None;
}
};
let height = match env_var_query.get_env_var(RESOLUTION_HEIGHT) {
Ok(res) => res.parse().unwrap(),
Err(_) => {
trace!("get_env_var_resolution - resolution height not set");
return None;
}
};
Some((width, height))
}
/// This gets the resolutions supported by the camera.
fn get_resolution_options(resolution_info: rscam::ResolutionInfo) -> Vec<Resolution> {
match resolution_info {
rscam::ResolutionInfo::Discretes(resolution_options) => resolution_options,
rscam::ResolutionInfo::Stepwise { min, max, step } => {
let mut resolution_options: Vec<(u32, u32)> = Vec::new();
let width_step = step.0;
let height_step = step.1;
let min_width = min.0;
let min_height = min.1;
let max_width = max.0;
let steps = (max_width - min_width) / width_step;
for step_num in 0..steps {
let curr_width = min_width + step_num * width_step;
let curr_height = min_height + step_num * height_step;
resolution_options.push((curr_width, curr_height));
}
resolution_options
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use akri_shared::os::env_var::MockEnvVarQuery;
use std::env::VarError;
#[test]
fn test_get_format() {
let _ = env_logger::builder().is_test(true).try_init();
let mut mock_query = MockEnvVarQuery::new();
const MOCK_FORMAT: &str = "YUYV";
let mut format_options: Vec<String> =
vec!["OTHR".to_string(), "YUYV".to_string(), "MJPG".to_string()];
// Test when env var set and camera supports that format
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FORMAT)
.returning(move |_| Ok(MOCK_FORMAT.to_string()));
assert_eq!(
"YUYV".to_string(),
get_format(&mock_query, format_options.clone())
);
// Test when env var not set but camera supports default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FORMAT)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
"MJPG".to_string(),
get_format(&mock_query, format_options.clone())
);
// Test when env var not set and camera does not support default
format_options.pop();
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FORMAT)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!("OTHR".to_string(), get_format(&mock_query, format_options));
// Test when env var set and camera does not support format nor the default one
let minimal_format_options: Vec<String> = vec!["OTHR".to_string(), "BLAH".to_string()];
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FORMAT)
.returning(move |_| Ok(MOCK_FORMAT.to_string()));
// Should choose first one
assert_eq!(
"OTHR".to_string(),
get_format(&mock_query, minimal_format_options)
);
}
#[test]
fn test_get_interval_stepwise() {
let _ = env_logger::builder().is_test(true).try_init();
let mut mock_query = MockEnvVarQuery::new();
const MOCK_INTERVAL: &str = "3";
// Test when env var set and camera supports that interval
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Ok(MOCK_INTERVAL.to_string()));
assert_eq!(
(1, 3),
get_interval(
&mock_query,
rscam::IntervalInfo::Stepwise {
min: (1, 1),
max: (1, 30),
step: (0, 2),
}
)
);
// Test when env var not set but camera supports default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
(1, 10),
get_interval(
&mock_query,
rscam::IntervalInfo::Stepwise {
min: (1, 1),
max: (1, 30),
step: (0, 9),
}
)
);
// Test when env var not set and camera does not support default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
// returns slowest interval
(1, 1),
get_interval(
&mock_query,
rscam::IntervalInfo::Stepwise {
min: (1, 1),
max: (1, 30),
step: (0, 2),
}
)
);
// Test when env var set and camera does not support that interval nor the default one
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Ok(MOCK_INTERVAL.to_string()));
assert_eq!(
(1, 1),
get_interval(
&mock_query,
rscam::IntervalInfo::Stepwise {
min: (1, 1),
max: (1, 30),
step: (0, 5),
}
)
);
}
#[test]
fn test_get_interval_discrete() {
let _ = env_logger::builder().is_test(true).try_init();
let mut mock_query = MockEnvVarQuery::new();
const MOCK_INTERVAL: &str = "3";
// Test when env var set and camera supports that interval
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Ok(MOCK_INTERVAL.to_string()));
assert_eq!(
(1, 3),
get_interval(
&mock_query,
rscam::IntervalInfo::Discretes(vec![(1, 1), (1, 3), (1, 5)])
)
);
// Test when env var not set but camera supports default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
(1, 10),
get_interval(
&mock_query,
rscam::IntervalInfo::Discretes(vec![(1, 1), (1, 3), (1, 10)])
)
);
// Test when env var not set and camera does not support default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
// returns slowest interval
(1, 1),
get_interval(
&mock_query,
rscam::IntervalInfo::Discretes(vec![(1, 1), (1, 3), (1, 5)])
)
);
// Test when env var set and camera does not support that interval nor the default one
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == FRAMES_PER_SECOND)
.returning(move |_| Ok(MOCK_INTERVAL.to_string()));
assert_eq!(
(1, 1),
get_interval(
&mock_query,
rscam::IntervalInfo::Discretes(vec![(1, 1), (1, 2), (1, 5)])
)
);
}
#[test]
fn test_get_resolution_stepwise() {
let _ = env_logger::builder().is_test(true).try_init();
let mut mock_query = MockEnvVarQuery::new();
const MOCK_RESOLUTION_WIDTH: &str = "424";
const MOCK_RESOLUTION_HEIGHT: &str = "240";
// Test when env var set and camera supports that interval
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Ok(MOCK_RESOLUTION_WIDTH.to_string()));
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_HEIGHT)
.returning(move |_| Ok(MOCK_RESOLUTION_HEIGHT.to_string()));
assert_eq!(
(424, 240),
get_resolution(
&mock_query,
rscam::ResolutionInfo::Stepwise {
min: (224, 140),
max: (1280, 800),
step: (200, 100),
}
)
);
// Test when env var not set but camera supports default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
(DEFAULT_RESOLUTION_WIDTH, DEFAULT_RESOLUTION_HEIGHT), // (640, 480)
get_resolution(
&mock_query,
rscam::ResolutionInfo::Stepwise {
min: (440, 280),
max: (1280, 800),
step: (200, 200),
}
)
);
// Test when env var not set and camera does not support default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
(160, 120),
get_resolution(
&mock_query,
rscam::ResolutionInfo::Stepwise {
min: (160, 120),
max: (1280, 800),
step: (100, 100),
}
)
);
// Test when env var set and camera does not support that interval nor the default one
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Ok(MOCK_RESOLUTION_WIDTH.to_string()));
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_HEIGHT)
.returning(move |_| Ok(MOCK_RESOLUTION_HEIGHT.to_string()));
assert_eq!(
(160, 120),
get_resolution(
&mock_query,
rscam::ResolutionInfo::Stepwise {
min: (160, 120),
max: (1280, 800),
step: (100, 100),
}
)
);
}
#[test]
fn test_get_resolution_discrete() {
let _ = env_logger::builder().is_test(true).try_init();
let mut mock_query = MockEnvVarQuery::new();
const MOCK_RESOLUTION_WIDTH: &str = "424";
const MOCK_RESOLUTION_HEIGHT: &str = "240";
// Test when env var set and camera supports that interval
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Ok(MOCK_RESOLUTION_WIDTH.to_string()));
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_HEIGHT)
.returning(move |_| Ok(MOCK_RESOLUTION_HEIGHT.to_string()));
assert_eq!(
(424, 240),
get_resolution(
&mock_query,
rscam::ResolutionInfo::Discretes(vec!((200, 100), (424, 240), (1000, 800)))
)
);
// Test when env var not set but camera supports default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
(DEFAULT_RESOLUTION_WIDTH, DEFAULT_RESOLUTION_HEIGHT), // (640, 480)
get_resolution(
&mock_query,
rscam::ResolutionInfo::Discretes(vec!(
(200, 100),
(424, 240),
(640, 480),
(1000, 800)
))
)
);
// Test when env var not set and camera does not support default
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Err(VarError::NotPresent));
assert_eq!(
(200, 100),
get_resolution(
&mock_query,
rscam::ResolutionInfo::Discretes(vec!((200, 100), (450, 240), (1000, 800)))
)
);
// Test when env var set and camera does not support that interval nor the default one
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_WIDTH)
.returning(move |_| Ok(MOCK_RESOLUTION_WIDTH.to_string()));
mock_query
.expect_get_env_var()
.times(1)
.withf(move |name: &str| name == RESOLUTION_HEIGHT)
.returning(move |_| Ok(MOCK_RESOLUTION_HEIGHT.to_string()));
assert_eq!(
(200, 100),
get_resolution(
&mock_query,
rscam::ResolutionInfo::Discretes(vec!((200, 100), (500, 250), (1000, 800)))
)
);
}
}
| 36.177066 | 180 | 0.556332 |
f55fc3e040293322f87f9daecd467aa4c9c22ae1 | 5,462 | use std::borrow::Cow;
use ergo_js::{
BufferConsole, ConsoleMessage, Extension, Runtime, RuntimeOptions, RuntimePool,
SerializedState, Snapshot,
};
use itertools::Itertools;
use schemars::JsonSchema;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tracing::{event, Level};
const NET_SNAPSHOT: &'static [u8] = include_bytes!("./snapshots/net");
const CORE_SNAPSHOT: &'static [u8] = include_bytes!("./snapshots/core");
lazy_static::lazy_static! {
pub static ref POOL : RuntimePool = RuntimePool::new(None);
}
#[derive(Clone, Debug, JsonSchema, Serialize, Deserialize)]
pub struct TaskSerializedJsState {
pub state: ergo_js::SerializedState,
pub console: Vec<ConsoleMessage>,
}
fn snapshot_and_extensions(
allow_net: bool,
random_seed: Option<u64>,
) -> (&'static [u8], Vec<Extension>) {
if allow_net {
(NET_SNAPSHOT, ergo_js::net_extensions(random_seed))
} else {
(CORE_SNAPSHOT, ergo_js::core_extensions(random_seed))
}
}
/// Create a runtime suitable for running tasks, with serialized execution and optional network
/// access. If `state` is `None`, a new [SerializedState] will be created.
pub fn create_serialized_task_script_runtime(
state: Option<SerializedState>,
allow_net: bool,
) -> Runtime {
let state = state.unwrap_or_else(Default::default);
let (snapshot, extensions) = snapshot_and_extensions(allow_net, Some(state.random_seed));
Runtime::new(RuntimeOptions {
console: Some(Box::new(BufferConsole::new(ergo_js::ConsoleLevel::Debug))),
extensions,
snapshot: Some(Snapshot::Static(snapshot)),
serialized_state: Some(state),
..Default::default()
})
}
/// Create a runtime suitable for running tasks, with serialized execution and optional network
/// access. If `state` is `None`, a new [SerializedState] will be created.
pub fn create_nonserialized_task_script_runtime(allow_net: bool) -> Runtime {
let (snapshot, extensions) = snapshot_and_extensions(allow_net, None);
Runtime::new(RuntimeOptions {
console: Some(Box::new(BufferConsole::new(ergo_js::ConsoleLevel::Debug))),
extensions,
snapshot: Some(Snapshot::Static(snapshot)),
..Default::default()
})
}
/// Create a full-featured, non-serialized runtime.
pub fn create_executor_runtime() -> Runtime {
let (snapshot, extensions) = snapshot_and_extensions(true, None);
Runtime::new(RuntimeOptions {
console: Some(Box::new(BufferConsole::new(ergo_js::ConsoleLevel::Info))),
extensions,
snapshot: Some(Snapshot::Static(snapshot)),
..Default::default()
})
}
/// Create a simple runtime without net access or serialized execution.
/// This is used for things like evaluating guard conditions in state machines.
pub fn create_simple_runtime() -> Runtime {
Runtime::new(RuntimeOptions {
console: Some(Box::new(BufferConsole::new(ergo_js::ConsoleLevel::Debug))),
extensions: ergo_js::core_extensions(None),
snapshot: Some(Snapshot::Static(CORE_SNAPSHOT)),
..Default::default()
})
}
pub fn wrap_in_function(script: &str) -> String {
format!(
r##"(function() {{
{}
}})()"##,
script
)
}
pub fn wrap_in_function_with_args(
script: &str,
arg_name: &str,
arg: impl Serialize,
) -> Result<String, serde_json::Error> {
let arg_value = serde_json::to_string(&arg)?;
let output = format!(
r##"(function({arg_name}) {{
{script}
}})({arg_value})"##,
script = script,
arg_name = arg_name,
arg_value = arg_value
);
Ok(output)
}
pub async fn run_simple_with_context_and_payload<
RESULT: DeserializeOwned + std::fmt::Debug + Send + 'static,
>(
script: &str,
context: Option<&serde_json::Value>,
payload: Option<&serde_json::Value>,
) -> Result<RESULT, anyhow::Error> {
let payload_arg = payload
.map(Cow::Borrowed)
.unwrap_or(Cow::Owned(serde_json::Value::Null));
let context_arg = context
.map(Cow::Borrowed)
.unwrap_or(Cow::Owned(serde_json::Value::Null));
run_simple_with_args(
script,
&[
("context", context_arg.as_ref()),
("payload", payload_arg.as_ref()),
],
)
.await
}
pub async fn run_simple_with_args<RESULT: DeserializeOwned + std::fmt::Debug + Send + 'static>(
script: &str,
args: &[(&str, &serde_json::Value)],
) -> Result<RESULT, anyhow::Error> {
let wrapped = format!(
r##"(function({arg_names}) {{
{script}
}})({arg_values})"##,
arg_names = args.iter().map(|a| a.0).join(","),
arg_values = args.iter().map(|a| a.1).join(", "),
script = script
);
event!(Level::TRACE, script=%wrapped, "running script");
POOL.run(move || async move {
let mut runtime = create_simple_runtime();
let result: RESULT = runtime.run_expression("script", wrapped.as_str())?;
Ok(result)
})
.await
}
#[cfg(test)]
mod tests {
use serde_json::json;
#[tokio::test]
async fn run_simple_with_context_and_payload() {
let input_script = r##"return payload.value"##;
let result: i64 = super::run_simple_with_context_and_payload(
input_script,
None,
Some(&json!({ "value": 5 })),
)
.await
.unwrap();
assert_eq!(result, 5);
}
}
| 30.344444 | 95 | 0.640242 |
11af7476b92ba35762ce6f588d38a3c5797c0d47 | 2,597 | // Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use std::sync::atomic;
// Re-export this for convenience
pub use std::sync::atomic::{Ordering, fence};
// Wrapper around AtomicUsize for non-nightly which has usable compare_exchange
// and compare_exchange_weak methods.
pub struct AtomicUsize(atomic::AtomicUsize);
pub use self::AtomicUsize as AtomicU8;
// Constants for static initialization
pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize(atomic::ATOMIC_USIZE_INIT);
pub use self::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
impl AtomicUsize {
#[inline]
pub fn new(val: usize) -> AtomicUsize {
AtomicUsize(atomic::AtomicUsize::new(val))
}
#[inline]
pub fn load(&self, order: Ordering) -> usize {
self.0.load(order)
}
#[inline]
pub fn store(&self, val: usize, order: Ordering) {
self.0.store(val, order);
}
#[inline]
pub fn swap(&self, val: usize, order: Ordering) -> usize {
self.0.swap(val, order)
}
#[inline]
pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
self.0.fetch_add(val, order)
}
#[inline]
pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
self.0.fetch_sub(val, order)
}
#[inline]
pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
self.0.fetch_and(val, order)
}
#[inline]
pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
self.0.fetch_or(val, order)
}
#[inline]
pub fn compare_exchange(&self,
old: usize,
new: usize,
order: Ordering,
_: Ordering)
-> Result<usize, usize> {
let res = self.0.compare_and_swap(old, new, order);
if res == old { Ok(res) } else { Err(res) }
}
#[inline]
pub fn compare_exchange_weak(&self,
old: usize,
new: usize,
order: Ordering,
_: Ordering)
-> Result<usize, usize> {
let res = self.0.compare_and_swap(old, new, order);
if res == old { Ok(res) } else { Err(res) }
}
}
| 33.294872 | 82 | 0.571044 |
fb14820113e00b92eaae7d620c99627b5a11930e | 1,289 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations on unique pointer types
// NB: transitionary, de-mode-ing.
#[forbid(deprecated_mode)];
#[forbid(deprecated_pattern)];
use cmp::{Eq, Ord};
#[cfg(notest)]
impl<T:Eq> ~const T : Eq {
#[inline(always)]
pure fn eq(&self, other: &~const T) -> bool { *(*self) == *(*other) }
#[inline(always)]
pure fn ne(&self, other: &~const T) -> bool { *(*self) != *(*other) }
}
#[cfg(notest)]
impl<T:Ord> ~const T : Ord {
#[inline(always)]
pure fn lt(&self, other: &~const T) -> bool { *(*self) < *(*other) }
#[inline(always)]
pure fn le(&self, other: &~const T) -> bool { *(*self) <= *(*other) }
#[inline(always)]
pure fn ge(&self, other: &~const T) -> bool { *(*self) >= *(*other) }
#[inline(always)]
pure fn gt(&self, other: &~const T) -> bool { *(*self) > *(*other) }
}
| 33.051282 | 73 | 0.617533 |
71ccc1c2bf6d66d01ebdaf38a03a89dfd7644ca0 | 756 | use nannou::prelude::*;
fn main() {
nannou::sketch(view).run()
}
fn view(app: &App, frame: Frame) {
let draw = app.draw();
let r = app.window_rect();
draw.background().color(BLACK);
for r in r.subdivisions_iter() {
for r in r.subdivisions_iter() {
for r in r.subdivisions_iter() {
let side = r.w().min(r.h());
let start = r.xy();
let start_to_mouse = app.mouse.position() - start;
let target_mag = start_to_mouse.length().min(side * 0.5);
let end = start + start_to_mouse.normalize() * target_mag;
draw.arrow().weight(5.0).points(start, end);
}
}
}
draw.to_frame(app, &frame).unwrap();
}
| 28 | 74 | 0.525132 |
1a1227c7920193c4e07d3aa7bbd6fafe2633618c | 795 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that unary structs can be mutably borrowed.
struct Empty;
trait T<U> {
fn next(&mut self) -> Option<U>;
}
impl T<isize> for Empty {
fn next(&mut self) -> Option<isize> { None }
}
fn do_something_with(a : &mut T<isize>) {
println!("{:?}", a.next())
}
pub fn main() {
do_something_with(&mut Empty);
}
| 27.413793 | 68 | 0.689308 |
f776c53d1b697d229f5cb9766f111d37711f0b52 | 2,331 | //! # Yew Framework - API Documentation
//!
//! Yew is a framework for web-client apps created with
//! a modern Rust-to-Wasm compilation feature.
//! This framework was highly inspired by
//! [Elm](http://elm-lang.org/) and [React](https://reactjs.org/).
//!
//! Minimal example:
//!
//! ```rust
//! #[macro_use]
//! extern crate yew;
//! use yew::prelude::*;
//!
//! struct Model {
//! value: i64,
//! }
//!
//! enum Msg {
//! DoIt,
//! }
//!
//! impl Component for Model {
//! type Message = Msg;
//! type Properties = ();
//! fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
//! Self {
//! value: 0,
//! }
//! }
//!
//! fn update(&mut self, msg: Self::Message) -> ShouldRender {
//! match msg {
//! Msg::DoIt => self.value = self.value + 1
//! }
//! true
//! }
//! }
//!
//! impl Renderable<Model> for Model {
//! fn view(&self) -> Html<Self> {
//! html! {
//! <div>
//! <button onclick=|_| Msg::DoIt,>{ "+1" }</button>
//! <p>{ self.value }</p>
//! </div>
//! }
//! }
//! }
//!
//! fn main() {
//! yew::initialize();
//! App::<Model>::new().mount_to_body();
//! yew::run_loop();
//! }
//! ```
//!
#![deny(
missing_docs,
bare_trait_objects,
anonymous_parameters,
elided_lifetimes_in_paths
)]
#![recursion_limit = "512"]
extern crate self as yew;
/// Alias module for the procedural macro.
pub mod macros {
pub use yew_macro::html;
}
pub mod components;
pub mod format;
pub mod services;
pub mod utils;
pub use yew_shared::*;
/// Initializes yew framework. It should be called first.
pub fn initialize() {
stdweb::initialize();
}
/// Starts event loop.
pub fn run_loop() {
stdweb::event_loop();
}
/// Starts an app mounted to a body of the document.
pub fn start_app<COMP>()
where
COMP: Component + Renderable<COMP>,
{
initialize();
App::<COMP>::new().mount_to_body();
run_loop();
}
/// The Yew Prelude
///
/// The purpose of this module is to alleviate imports of many common types:
///
/// ```
/// # #![allow(unused_imports)]
/// use yew::prelude::*;
/// ```
pub mod prelude {
pub use yew_macro::html;
pub use yew_shared::prelude::*;
}
pub use self::prelude::*;
| 20.447368 | 76 | 0.542686 |
29f94da921def7e4e0ea9bf057294402b90c9715 | 5,944 | use std::process::{Command, Child, Stdio};
use std::net::{SocketAddrV4, Ipv4Addr, TcpListener};
use std::thread;
use std::time::Duration;
use std::fmt;
use std::fs;
use tempdir::TempDir;
fn which(command: &str) -> Result<String, ()> {
let mut cmd = if cfg!(target_os = "windows") {
Command::new("where")
} else {
Command::new("which")
};
let output = cmd.arg(command).output()
.expect("failed to execute `which`");
if output.status.success() {
let s = String::from_utf8(output.stdout)
.map_err(|_| ())?;
Ok(s.trim().to_owned())
} else {
Err(())
}
}
fn get_unused_port() -> Result<u16, std::io::Error> {
let loopback = Ipv4Addr::new(127, 0, 0, 1);
let socket = SocketAddrV4::new(loopback, 0);
let listener = TcpListener::bind(socket)?;
let port = listener.local_addr()?.port();
Ok(port)
}
pub struct PsqlServer {
process: Child,
base_dir: Option<TempDir>,
pub port: u16
}
#[derive(Debug)]
pub enum PsqlServerError {
CouldNotFindPostgresCommand,
CouldNotFindInitDbCommand,
CouldNotFindCreateDbCommand,
CouldNotFindPgIsReadyCommand,
InitDbFailed,
CreateDbFailed,
PostgresFailed,
IoError(std::io::Error)
}
impl std::error::Error for PsqlServerError {
}
impl std::fmt::Display for PsqlServerError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
PsqlServerError::CouldNotFindPostgresCommand =>
write!(f, "Could not find `postgres` command"),
PsqlServerError::CouldNotFindInitDbCommand =>
write!(f, "Could not find `initdb` command"),
PsqlServerError::CouldNotFindCreateDbCommand =>
write!(f, "Could not find `createdb` command"),
PsqlServerError::CouldNotFindPgIsReadyCommand =>
write!(f, "Could not find `pg_isready` command"),
PsqlServerError::InitDbFailed =>
write!(f, "initdb failed"),
PsqlServerError::CreateDbFailed =>
write!(f, "createdb failed"),
PsqlServerError::PostgresFailed =>
write!(f, "postgres failed"),
PsqlServerError::IoError(error) =>
write!(f, "{}", error)
}
}
}
impl PsqlServer {
pub fn start() -> Result<PsqlServer, PsqlServerError> {
let postgres = which("postgres")
.map_err(|_| PsqlServerError::CouldNotFindPostgresCommand)?;
let initdb = which("initdb")
.map_err(|_| PsqlServerError::CouldNotFindInitDbCommand)?;
let createdb = which("createdb")
.map_err(|_| PsqlServerError::CouldNotFindCreateDbCommand)?;
let pg_isready = which("pg_isready")
.map_err(|_| PsqlServerError::CouldNotFindPgIsReadyCommand)?;
let base_dir = TempDir::new("postgresql")
.map_err(|e| PsqlServerError::IoError(e))?;
let base_path = base_dir.path();
let data_path = base_path.join("data").to_str()
.unwrap().to_owned();
let tmp_path = base_path.join("tmp").to_str()
.unwrap().to_owned();
fs::create_dir(&data_path)
.map_err(|e| PsqlServerError::IoError(e))?;
fs::create_dir(&tmp_path)
.map_err(|e| PsqlServerError::IoError(e))?;
let initdb_out = Command::new(&initdb)
.args(&["-D", &data_path, "--lc-messages=C",
"-U", "postgres", "-A", "trust"])
.output()
.expect(&format!("failed to execute {}", initdb));
if !initdb_out.status.success() {
return Err(PsqlServerError::InitDbFailed);
}
let port = get_unused_port()
.map_err(|e| PsqlServerError::IoError(e))?;
let mut process = Command::new(postgres)
.args(&["-p", &format!("{}", port),
"-D", &data_path,
"-k", &tmp_path,
"-h", "127.0.0.1",
"-F",
"-c", "logging_collector=off"])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("failed to execute psql");
loop {
if let Some(_exit_code) = process.try_wait()
.map_err(|e| PsqlServerError::IoError(e))? {
return Err(PsqlServerError::PostgresFailed);
}
let isready_out = Command::new(&pg_isready)
.args(&["-p", &format!("{}", port),
"-h", "127.0.0.1",
"-U", "postgres"])
.output()
.expect("failed to execute pg_isready");
if isready_out.status.success() {
break;
} else {
thread::sleep(Duration::from_millis(500))
}
}
let createdb_out = Command::new(createdb)
.args(&["-p", &format!("{}", port),
"-h", "127.0.0.1",
"-U", "postgres",
"test"])
.output()
.expect("failed to execute createdb");
if !createdb_out.status.success() {
return Err(PsqlServerError::CreateDbFailed);
}
Ok(PsqlServer {
process,
base_dir: Some(base_dir),
port
})
}
}
impl fmt::Debug for PsqlServer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "PsqlServer {{ port: {}, base_dir: {} }}",
self.port,
self.base_dir.as_ref().unwrap().path().display())
}
}
impl Drop for PsqlServer {
fn drop(&mut self) {
self.process.kill()
.expect("failed to kill postgres");
self.process.wait().expect("....");
self.base_dir.take().unwrap().close().expect("failed to delete temp dir");
}
}
| 32.480874 | 82 | 0.533647 |
5d118cae84f51689e144cb6fd7892b655a03ef54 | 6,296 | use std::collections::HashMap;
use std::sync::Arc;
use failure::bail;
use snips_nlu_utils::token::Token;
use crate::errors::*;
use crate::models::FeatureFactory;
use crate::resources::SharedResources;
use crate::slot_filler::features::*;
pub struct ProbabilisticFeatureProcessor {
features_offsetters: Vec<FeatureOffsetter>,
}
impl ProbabilisticFeatureProcessor {
pub fn new(
features: &[FeatureFactory],
shared_resources: Arc<SharedResources>,
) -> Result<ProbabilisticFeatureProcessor> {
let features_offsetters = features
.iter()
.map(|f| get_features(f, shared_resources.clone()))
.collect::<Result<Vec<Vec<_>>>>()?
.into_iter()
.flat_map(|fs| fs)
.collect();
Ok(ProbabilisticFeatureProcessor {
features_offsetters,
})
}
}
impl ProbabilisticFeatureProcessor {
#[rustfmt::skip]
pub fn compute_features(&self, input: &&[Token]) -> Result<Vec<Vec<(String, String)>>> {
let mut features = vec![vec![]; input.len()];
for offsetter in self.features_offsetters.iter() {
for i in 0..input.len() {
if let Some(value) = offsetter.feature.compute(input, i)? {
offsetter.offsets_with_name().iter().for_each(|&(offset, ref key)| {
if i as i32 - offset >= 0 && i as i32 - offset < input.len() as i32 {
features[(i as i32 - offset) as usize].push(
(key.clone(), value.clone())
);
}
});
}
}
}
Ok(features)
}
}
struct FeatureOffsetter {
feature: Box<dyn Feature>,
offsets: Vec<i32>,
}
impl FeatureOffsetter {
fn offsets_with_name(&self) -> Vec<(i32, String)> {
self.offsets
.iter()
.map(|i| {
(
*i,
if *i == 0 {
self.feature.name().to_string()
} else {
format!("{}[{:+}]", self.feature.name(), i)
},
)
})
.collect()
}
}
pub trait FeatureKindRepr {
fn feature_kind(&self) -> FeatureKind;
}
pub trait Feature: FeatureKindRepr + Send + Sync {
fn name(&self) -> String {
self.feature_kind().identifier().to_string()
}
fn build_features(
args: &HashMap<String, serde_json::Value>,
shared_resources: Arc<SharedResources>,
) -> Result<Vec<Box<dyn Feature>>>
where
Self: Sized;
fn compute(&self, tokens: &[Token], token_index: usize) -> Result<Option<String>>;
}
get_features!([
(IsDigitFeature, is_digit),
(LengthFeature, length),
(IsFirstFeature, is_first),
(IsLastFeature, is_last),
(NgramFeature, ngram),
(ShapeNgramFeature, shape_ngram),
(PrefixFeature, prefix),
(SuffixFeature, suffix),
(CustomEntityMatchFeature, entity_match),
(BuiltinEntityMatchFeature, builtin_entity_match),
(WordClusterFeature, word_cluster)
]);
#[cfg(test)]
mod tests {
use super::*;
use snips_nlu_utils::language::Language;
use snips_nlu_utils::token::tokenize;
#[test]
fn test_compute_features() {
// Given
let language = Language::EN;
let fp = ProbabilisticFeatureProcessor {
features_offsetters: vec![
FeatureOffsetter {
offsets: vec![0],
feature: Box::new(IsDigitFeature {}) as Box<_>,
},
FeatureOffsetter {
offsets: vec![0],
feature: Box::new(LengthFeature {}) as Box<_>,
},
],
};
let tokens = tokenize("I prefer 7 over 777", language);
// When
let computed_features = fp.compute_features(&tokens.as_slice()).unwrap();
let expected_features = vec![
vec![("length".to_string(), "1".to_string())],
vec![("length".to_string(), "6".to_string())],
vec![
("is_digit".to_string(), "1".to_string()),
("length".to_string(), "1".to_string()),
],
vec![("length".to_string(), "4".to_string())],
vec![
("is_digit".to_string(), "1".to_string()),
("length".to_string(), "3".to_string()),
],
];
// Then
assert_eq!(expected_features, computed_features);
}
#[test]
fn test_offset() {
// Given
let language = Language::EN;
let fp = ProbabilisticFeatureProcessor {
features_offsetters: vec![
FeatureOffsetter {
offsets: vec![-2, 0, 3],
feature: Box::new(IsDigitFeature {}) as Box<_>,
},
FeatureOffsetter {
offsets: vec![-1, 1],
feature: Box::new(LengthFeature {}) as Box<_>,
},
],
};
let tokens = tokenize("I prefer 7 over 777", language);
// When
let computed_features = fp.compute_features(&tokens.as_slice()).unwrap();
// Then
let expected_features = vec![
vec![("length[+1]".to_string(), "6".to_string())],
vec![
("is_digit[+3]".to_string(), "1".to_string()),
("length[-1]".to_string(), "1".to_string()),
("length[+1]".to_string(), "1".to_string()),
],
vec![
("is_digit".to_string(), "1".to_string()),
("length[-1]".to_string(), "6".to_string()),
("length[+1]".to_string(), "4".to_string()),
],
vec![
("length[-1]".to_string(), "1".to_string()),
("length[+1]".to_string(), "3".to_string()),
],
vec![
("is_digit[-2]".to_string(), "1".to_string()),
("is_digit".to_string(), "1".to_string()),
("length[-1]".to_string(), "4".to_string()),
],
];
assert_eq!(expected_features, computed_features);
}
}
| 31.014778 | 93 | 0.499047 |
ef4a571dff5114f339baa2e7c69370d383f61d34 | 11,045 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lazy_static::lazy_static;
use neqo_common::event::Provider;
use neqo_crypto::AuthenticationStatus;
use neqo_http3::{
Error, Header, Http3Client, Http3ClientEvent, Http3OrWebTransportStream, Http3Server,
Http3ServerEvent, Priority,
};
use test_fixture::*;
const RESPONSE_DATA: &[u8] = &[0x61, 0x62, 0x63];
lazy_static! {
static ref RESPONSE_HEADER_NO_DATA: Vec<Header> =
vec![Header::new(":status", "200"), Header::new("something", "3")];
}
lazy_static! {
static ref RESPONSE_HEADER_103: Vec<Header> =
vec![Header::new(":status", "103"), Header::new("link", "...")];
}
fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) {
let mut out = None;
loop {
out = client.process(out, now()).dgram();
out = server.process(out, now()).dgram();
if out.is_none() {
break;
}
}
}
fn receive_request(server: &mut Http3Server) -> Option<Http3OrWebTransportStream> {
while let Some(event) = server.next_event() {
if let Http3ServerEvent::Headers {
stream,
headers,
fin,
} = event
{
assert_eq!(
&headers,
&[
Header::new(":method", "GET"),
Header::new(":scheme", "https"),
Header::new(":authority", "something.com"),
Header::new(":path", "/")
]
);
assert!(fin);
return Some(stream);
}
}
None
}
fn send_trailers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> {
request.send_headers(&[
Header::new("something1", "something"),
Header::new("something2", "3"),
])
}
fn send_informational_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> {
request.send_headers(&RESPONSE_HEADER_103)
}
fn send_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> {
request.send_headers(&[
Header::new(":status", "200"),
Header::new("content-length", "3"),
])
}
fn process_client_events(conn: &mut Http3Client) {
let mut response_header_found = false;
let mut response_data_found = false;
while let Some(event) = conn.next_event() {
match event {
Http3ClientEvent::HeaderReady { headers, fin, .. } => {
assert!(
(headers.as_ref()
== [
Header::new(":status", "200"),
Header::new("content-length", "3"),
])
|| (headers.as_ref() == *RESPONSE_HEADER_103)
);
assert!(!fin);
response_header_found = true;
}
Http3ClientEvent::DataReadable { stream_id } => {
let mut buf = [0u8; 100];
let (amount, fin) = conn.read_data(now(), stream_id, &mut buf).unwrap();
assert!(fin);
assert_eq!(amount, RESPONSE_DATA.len());
assert_eq!(&buf[..RESPONSE_DATA.len()], RESPONSE_DATA);
response_data_found = true;
}
_ => {}
}
}
assert!(response_header_found);
assert!(response_data_found);
}
fn process_client_events_no_data(conn: &mut Http3Client) {
let mut response_header_found = false;
let mut fin_received = false;
while let Some(event) = conn.next_event() {
match event {
Http3ClientEvent::HeaderReady { headers, fin, .. } => {
assert_eq!(headers.as_ref(), *RESPONSE_HEADER_NO_DATA);
fin_received = fin;
response_header_found = true;
}
Http3ClientEvent::DataReadable { stream_id } => {
let mut buf = [0u8; 100];
let (amount, fin) = conn.read_data(now(), stream_id, &mut buf).unwrap();
assert!(fin);
fin_received = true;
assert_eq!(amount, 0);
}
_ => {}
}
}
assert!(response_header_found);
assert!(fin_received);
}
fn connect_send_and_receive_request() -> (Http3Client, Http3Server, Http3OrWebTransportStream) {
let mut hconn_c = default_http3_client();
let mut hconn_s = default_http3_server();
exchange_packets(&mut hconn_c, &mut hconn_s);
let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded);
assert!(hconn_c.events().any(authentication_needed));
hconn_c.authenticated(AuthenticationStatus::Ok, now());
exchange_packets(&mut hconn_c, &mut hconn_s);
let req = hconn_c
.fetch(
now(),
"GET",
&("https", "something.com", "/"),
&[],
Priority::default(),
)
.unwrap();
assert_eq!(req, 0);
hconn_c.stream_close_send(req).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
let request = receive_request(&mut hconn_s).unwrap();
(hconn_c, hconn_s, request)
}
#[test]
fn response_trailers1() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
send_trailers(&mut request).unwrap();
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn response_trailers2() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
send_trailers(&mut request).unwrap();
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn response_trailers3() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
send_trailers(&mut request).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn response_trailers_no_data() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
send_trailers(&mut request).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events_no_data(&mut hconn_c);
}
#[test]
fn multiple_response_trailers() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
send_trailers(&mut request).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
assert_eq!(send_trailers(&mut request), Err(Error::InvalidInput));
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn data_after_trailer() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
send_trailers(&mut request).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
assert_eq!(request.send_data(RESPONSE_DATA), Err(Error::InvalidInput));
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn trailers_after_close() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
request.stream_close_send().unwrap();
assert_eq!(send_trailers(&mut request), Err(Error::InvalidStreamId));
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn multiple_response_headers() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap();
assert_eq!(
request.send_headers(&RESPONSE_HEADER_NO_DATA),
Err(Error::InvalidHeader)
);
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events_no_data(&mut hconn_c);
}
#[test]
fn informational_after_response_headers() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap();
assert_eq!(
send_informational_headers(&mut request),
Err(Error::InvalidHeader)
);
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events_no_data(&mut hconn_c);
}
#[test]
fn data_after_informational() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_informational_headers(&mut request).unwrap();
assert_eq!(request.send_data(RESPONSE_DATA), Err(Error::InvalidInput));
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn non_trailers_headers_after_data() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
assert_eq!(
request.send_headers(&RESPONSE_HEADER_NO_DATA),
Err(Error::InvalidHeader)
);
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
#[test]
fn data_before_headers() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
assert_eq!(request.send_data(RESPONSE_DATA), Err(Error::InvalidInput));
send_headers(&mut request).unwrap();
request.send_data(RESPONSE_DATA).unwrap();
request.stream_close_send().unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
process_client_events(&mut hconn_c);
}
| 33.469697 | 96 | 0.644726 |
2fbdef01109cf1b74944235b78e39c3faf30c397 | 2,234 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::io::BufReader;
use std::path::PathBuf;
use crate::errors::{Error, Result};
use crate::FileSourceResult;
use crate::{compiler_state::CompilerState, config::Config};
use common::PerfLogger;
use std::fs::File as FsFile;
use super::File;
/// The purpose of this module is to handle saved state and list of changed files
/// from the external source, and not from the watchman
pub struct ExternalFileSource<'config> {
pub config: &'config Config,
}
#[derive(Debug)]
pub struct ExternalFileSourceResult {
pub files: Vec<File>,
pub resolved_root: PathBuf,
}
impl ExternalFileSourceResult {
fn read_from_fs(path: &PathBuf, resolved_root: PathBuf) -> Result<Self> {
let file = FsFile::open(path).map_err(|err| Error::ReadFileError {
file: path.clone(),
source: err,
})?;
let files: Vec<File> =
serde_json::from_reader(BufReader::new(file)).map_err(|err| Error::SerdeError {
file: path.clone(),
source: err,
})?;
Ok(Self {
files,
resolved_root,
})
}
}
impl<'config> ExternalFileSource<'config> {
pub fn new(config: &'config Config) -> Self {
Self { config }
}
pub fn create_compiler_state(&self, perf_logger: &impl PerfLogger) -> Result<CompilerState> {
let load_saved_state_file = self.config.load_saved_state_file.as_ref().unwrap();
let changed_files_list = self.config.changed_files_list.as_ref().unwrap();
let root_dir = &self.config.root_dir;
let mut compiler_state = CompilerState::deserialize_from_file(&load_saved_state_file)?;
compiler_state
.pending_file_source_changes
.write()
.unwrap()
.push(FileSourceResult::External(
ExternalFileSourceResult::read_from_fs(changed_files_list, root_dir.clone())?,
));
compiler_state.merge_file_source_changes(&self.config, perf_logger, true)?;
Ok(compiler_state)
}
}
| 30.189189 | 97 | 0.644584 |
f70ffd1afe3a067742223b7bbf658068fc06a759 | 2,538 | #![doc(alias = "user.update")]
//! Specified user updates their account.
use super::*;
/// [`user.update`](https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#userupdate): user updates their account.
#[derive(Clone, Debug, typed_builder::TypedBuilder, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "deny_unknown_fields", serde(deny_unknown_fields))]
#[non_exhaustive]
pub struct UserUpdateV1 {
/// The user ID for the user you want update notifications for.
#[builder(setter(into))]
pub user_id: types::UserId,
}
impl EventSubscription for UserUpdateV1 {
type Payload = UserUpdateV1Payload;
const EVENT_TYPE: EventType = EventType::UserUpdate;
#[cfg(feature = "twitch_oauth2")]
const OPT_SCOPE: &'static [twitch_oauth2::Scope] = &[twitch_oauth2::Scope::UserReadEmail];
#[cfg(feature = "twitch_oauth2")]
const SCOPE: &'static [twitch_oauth2::Scope] = &[];
const VERSION: &'static str = "1";
}
/// [`user.update`](UserUpdateV1) response payload.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "deny_unknown_fields", serde(deny_unknown_fields))]
#[non_exhaustive]
pub struct UserUpdateV1Payload {
/// The user’s description.
pub description: String,
/// The user’s email. Only included if you have the [`user:read:email`](twitch_oauth2::Scope::UserReadEmail) scope for the user.
pub email: Option<String>,
/// The user’s user id.
pub user_id: types::UserId,
/// The user’s user login.
pub user_login: types::UserName,
/// The user’s user display name.
pub user_name: types::DisplayName,
}
#[cfg(test)]
#[test]
fn parse_payload() {
let payload = r#"
{
"subscription": {
"id": "f1c2a387-161a-49f9-a165-0f21d7a4e1c4",
"type": "user.update",
"version": "1",
"status": "enabled",
"cost": 0,
"condition": {
"user_id": "1337"
},
"transport": {
"method": "webhook",
"callback": "https://example.com/webhooks/callback"
},
"created_at": "2019-11-16T10:11:12.123Z"
},
"event": {
"user_id": "1337",
"user_login": "cool_user",
"user_name": "Cool_User",
"email": "[email protected]",
"description": "cool description"
}
}
"#;
let val = dbg!(crate::eventsub::Payload::parse(payload).unwrap());
crate::tests::roundtrip(&val)
}
| 33.84 | 132 | 0.611111 |
1ae97d23fc08bb409baa1015b3b3e2829ccc5ef4 | 13,257 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Typesafe wrappers around the /pkgfs/needs filesystem.
use {
crate::iou,
failure::Fail,
fidl_fuchsia_io::DirectoryProxy,
fuchsia_merkle::{Hash, ParseHashError},
fuchsia_zircon::Status,
futures::prelude::*,
std::collections::HashSet,
};
#[derive(Debug, Fail)]
#[allow(missing_docs)]
pub enum ListNeedsError {
#[fail(display = "while opening needs dir: {}", _0)]
OpenDir(#[cause] iou::OpenError),
#[fail(display = "while listing needs dir: {}", _0)]
ReadDir(#[cause] files_async::Error),
#[fail(display = "unable to parse a need blob id: {}", _0)]
ParseError(#[cause] ParseHashError),
}
/// An open handle to /pkgfs/needs
#[derive(Debug, Clone)]
pub struct Client {
proxy: DirectoryProxy,
}
impl Client {
/// Returns an client connected to pkgfs from the current component's namespace
pub fn open_from_namespace() -> Result<Self, failure::Error> {
let proxy = iou::open_directory_from_namespace("/pkgfs/needs")?;
Ok(Client { proxy })
}
/// Returns an client connected to pkgfs from the given pkgfs root dir.
pub fn open_from_pkgfs_root(pkgfs: &DirectoryProxy) -> Result<Self, failure::Error> {
Ok(Client {
proxy: iou::open_directory_no_describe(
pkgfs,
"needs",
fidl_fuchsia_io::OPEN_RIGHT_READABLE,
)?,
})
}
/// Returns a stream of chunks of blobs that are needed to resolve the package specified by
/// `pkg_merkle` provided that the `pkg_merkle` blob has previously been written to
/// /pkgfs/install/pkg/. The package should be available in /pkgfs/versions when this stream
/// terminates without error.
pub fn list_needs(
&self,
pkg_merkle: Hash,
) -> impl Stream<Item = Result<HashSet<Hash>, ListNeedsError>> + '_ {
// None if stream is terminated and should not continue to enumerate needs.
let state = Some(&self.proxy);
futures::stream::unfold(state, move |state: Option<&DirectoryProxy>| {
async move {
if let Some(proxy) = state {
match enumerate_needs_dir(proxy, pkg_merkle).await {
Ok(needs) => {
if needs.is_empty() {
None
} else {
Some((Ok(needs), Some(proxy)))
}
}
// report the error and terminate the stream.
Err(err) => return Some((Err(err), None)),
}
} else {
None
}
}
})
}
}
/// Lists all blobs currently in the `pkg_merkle`'s needs directory.
async fn enumerate_needs_dir(
pkgfs_needs: &DirectoryProxy,
pkg_merkle: Hash,
) -> Result<HashSet<Hash>, ListNeedsError> {
let path = format!("packages/{}", pkg_merkle);
let flags = fidl_fuchsia_io::OPEN_RIGHT_READABLE;
let needs_dir = match iou::open_directory(pkgfs_needs, &path, flags).await {
Ok(dir) => dir,
Err(iou::OpenError::OpenError(Status::NOT_FOUND)) => return Ok(HashSet::new()),
Err(e) => return Err(ListNeedsError::OpenDir(e)),
};
let entries = files_async::readdir(&needs_dir).await.map_err(ListNeedsError::ReadDir)?;
Ok(entries
.into_iter()
.filter_map(|entry| {
if entry.kind == files_async::DirentKind::File {
Some(entry.name.parse().map_err(ListNeedsError::ParseError))
} else {
// Ignore unknown entries.
None
}
})
.collect::<Result<HashSet<Hash>, ListNeedsError>>()?)
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::install::{BlobCreateError, BlobKind, BlobWriteSuccess},
fuchsia_pkg_testing::PackageBuilder,
maplit::hashset,
matches::assert_matches,
pkgfs_ramdisk::PkgfsRamdisk,
};
#[fuchsia_async::run_singlethreaded(test)]
async fn no_needs_is_empty_needs() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
let merkle = fuchsia_merkle::MerkleTree::from_reader(std::io::empty()).unwrap().root();
let mut needs = client.list_needs(merkle).boxed();
assert_matches!(needs.next().await, None);
pkgfs.stop().await.unwrap();
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_needs() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let install = crate::install::Client::open_from_pkgfs_root(&root).unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
let pkg = PackageBuilder::new("list-needs")
.add_resource_at("data/blob1", "blob1".as_bytes())
.add_resource_at("data/blob2", "blob2".as_bytes())
.build()
.await
.unwrap();
let pkg_contents = pkg.meta_contents().unwrap().contents().to_owned();
install.write_meta_far(&pkg).await;
let mut needs = client.list_needs(pkg.meta_far_merkle_root().to_owned()).boxed();
assert_matches!(
needs.next().await,
Some(Ok(needs)) if needs == hashset! {
pkg_contents["data/blob1"],
pkg_contents["data/blob2"],
}
);
install.write_blob(pkg_contents["data/blob1"], BlobKind::Data, "blob1".as_bytes()).await;
assert_matches!(
needs.next().await,
Some(Ok(needs)) if needs == hashset! {
pkg_contents["data/blob2"],
}
);
install.write_blob(pkg_contents["data/blob2"], BlobKind::Data, "blob2".as_bytes()).await;
assert_matches!(needs.next().await, None);
pkgfs.stop().await.unwrap();
}
#[fuchsia_async::run_singlethreaded(test)]
async fn shared_blob_still_needed_while_being_written() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let install = crate::install::Client::open_from_pkgfs_root(&root).unwrap();
let versions = crate::versions::Client::open_from_pkgfs_root(&root).unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
const SHARED_BLOB_CONTENTS: &[u8] = "shared between both packages".as_bytes();
let pkg1 = PackageBuilder::new("shared-content-a")
.add_resource_at("data/shared", SHARED_BLOB_CONTENTS)
.build()
.await
.unwrap();
let pkg2 = PackageBuilder::new("shared-content-b")
.add_resource_at("data/shared", SHARED_BLOB_CONTENTS)
.build()
.await
.unwrap();
let pkg_contents = pkg1.meta_contents().unwrap().contents().to_owned();
install.write_meta_far(&pkg1).await;
// start writing the shared blob, but don't finish.
let (blob, closer) =
install.create_blob(pkg_contents["data/shared"], BlobKind::Data).await.unwrap();
let blob = blob.truncate(SHARED_BLOB_CONTENTS.len() as u64).await.unwrap();
let (first, second) = SHARED_BLOB_CONTENTS.split_at(10);
let blob = match blob.write(first).await.unwrap() {
BlobWriteSuccess::MoreToWrite(blob) => blob,
BlobWriteSuccess::Done => unreachable!(),
};
// start installing the second package, and verify the shared blob is listed in needs
install.write_meta_far(&pkg2).await;
let mut needs = client.list_needs(pkg2.meta_far_merkle_root().to_owned()).boxed();
assert_matches!(
needs.next().await,
Some(Ok(needs)) if needs == hashset! {
pkg_contents["data/shared"],
}
);
// finish writing the shared blob, and verify both packages are now complete.
assert_matches!(blob.write(second).await, Ok(BlobWriteSuccess::Done));
closer.close().await;
assert_matches!(needs.next().await, None);
let pkg1_dir =
versions.open_package(pkg1.meta_far_merkle_root().to_owned(), None).await.unwrap();
pkg1.verify_contents(&pkg1_dir).await.unwrap();
let pkg2_dir =
versions.open_package(pkg2.meta_far_merkle_root().to_owned(), None).await.unwrap();
pkg2.verify_contents(&pkg2_dir).await.unwrap();
pkgfs.stop().await.unwrap();
}
#[fuchsia_async::run_singlethreaded(test)]
async fn initially_present_blobs_are_not_needed() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let install = crate::install::Client::open_from_pkgfs_root(&root).unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
const PRESENT_BLOB_CONTENTS: &[u8] = "already here".as_bytes();
let pkg = PackageBuilder::new("partially-cached")
.add_resource_at("data/present", PRESENT_BLOB_CONTENTS)
.add_resource_at("data/needed", "need to fetch this one".as_bytes())
.build()
.await
.unwrap();
let pkg_contents = pkg.meta_contents().unwrap().contents().to_owned();
// write the present blob and start installing the package.
pkgfs
.blobfs()
.add_blob_from(
&fuchsia_merkle::MerkleTree::from_reader(PRESENT_BLOB_CONTENTS).unwrap().root(),
PRESENT_BLOB_CONTENTS,
)
.unwrap();
install.write_meta_far(&pkg).await;
// confirm that the needed blob is needed and the present blob is present.
let mut needs = client.list_needs(pkg.meta_far_merkle_root().to_owned()).boxed();
assert_matches!(
needs.next().await,
Some(Ok(needs)) if needs == hashset! {
pkg_contents["data/needed"],
}
);
pkgfs.stop().await.unwrap();
}
#[fuchsia_async::run_singlethreaded(test)]
async fn racing_blob_writes_do_not_fulfill_partial_blobs() {
let pkgfs = PkgfsRamdisk::start().unwrap();
let root = pkgfs.root_dir_proxy().unwrap();
let install = crate::install::Client::open_from_pkgfs_root(&root).unwrap();
let versions = crate::versions::Client::open_from_pkgfs_root(&root).unwrap();
let client = Client::open_from_pkgfs_root(&root).unwrap();
const REQUIRED_BLOB_CONTENTS: &[u8] = "don't fulfill me early please".as_bytes();
let pkg = PackageBuilder::new("partially-cached")
.add_resource_at("data/required", REQUIRED_BLOB_CONTENTS)
.build()
.await
.unwrap();
let pkg_contents = pkg.meta_contents().unwrap().contents().to_owned();
// write the package meta far and verify the needed blob is needed.
install.write_meta_far(&pkg).await;
let mut needs = client.list_needs(pkg.meta_far_merkle_root().to_owned()).boxed();
assert_matches!(
needs.next().await,
Some(Ok(needs)) if needs == hashset! {
pkg_contents["data/required"],
}
);
// start writing the content blob, but don't finish yet.
let (blob, closer) =
install.create_blob(pkg_contents["data/required"], BlobKind::Data).await.unwrap();
let blob = blob.truncate(REQUIRED_BLOB_CONTENTS.len() as u64).await.unwrap();
let blob = match blob.write("don't ".as_bytes()).await.unwrap() {
BlobWriteSuccess::MoreToWrite(blob) => blob,
BlobWriteSuccess::Done => unreachable!(),
};
// verify the blob is still needed.
assert_matches!(
needs.next().await,
Some(Ok(needs)) if needs == hashset! {
pkg_contents["data/required"],
}
);
// trying to start writing the blob again fails.
assert_matches!(
install.create_blob(pkg_contents["data/required"], BlobKind::Data).await,
Err(BlobCreateError::ConcurrentWrite)
);
// no really, the blob is still needed.
assert_matches!(
needs.next().await,
Some(Ok(needs)) if needs == hashset! {
pkg_contents["data/required"],
}
);
// finish writing the blob.
assert_matches!(
blob.write("fulfill me early please".as_bytes()).await,
Ok(BlobWriteSuccess::Done)
);
closer.close().await;
// verify there are no more needs and the package is readable.
assert_matches!(needs.next().await, None);
let pkg_dir =
versions.open_package(pkg.meta_far_merkle_root().to_owned(), None).await.unwrap();
pkg.verify_contents(&pkg_dir).await.unwrap();
pkgfs.stop().await.unwrap();
}
}
| 37.449153 | 97 | 0.593271 |
3a2f6bd2fc2e2454e3ed80c6a0f5b1c71f30614c | 106,391 | // Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0.
use std::collections::BTreeMap;
use std::iter::{self, FromIterator};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use crate::coprocessor::Endpoint;
use crate::raftstore::store::{Callback, CasualMessage};
use crate::server::gc_worker::GCWorker;
use crate::server::load_statistics::ThreadLoad;
use crate::server::metrics::*;
use crate::server::snap::Task as SnapTask;
use crate::server::transport::RaftStoreRouter;
use crate::server::Error;
use crate::storage::kv::Error as EngineError;
use crate::storage::lock_manager::LockMgr;
use crate::storage::mvcc::{Error as MvccError, LockType, Write as MvccWrite, WriteType};
use crate::storage::txn::Error as TxnError;
use crate::storage::{
self, Engine, Key, Mutation, Options, PointGetCommand, Storage, TxnStatus, Value,
};
use futures::executor::{self, Notify, Spawn};
use futures::{future, Async, Future, Sink, Stream};
use grpcio::{
ClientStreamingSink, DuplexSink, Error as GrpcError, RequestStream, RpcContext, RpcStatus,
RpcStatusCode, ServerStreamingSink, UnarySink, WriteFlags,
};
use kvproto::coprocessor::*;
use kvproto::errorpb::{Error as RegionError, ServerIsBusy};
use kvproto::kvrpcpb::{self, *};
use kvproto::raft_cmdpb::{CmdType, RaftCmdRequest, RaftRequestHeader, Request as RaftRequest};
use kvproto::raft_serverpb::*;
use kvproto::tikvpb::*;
use prometheus::HistogramTimer;
use tikv_util::collections::HashMap;
use tikv_util::future::{paired_future_callback, AndThenWith};
use tikv_util::metrics::HistogramReader;
use tikv_util::mpsc::batch::{unbounded, BatchReceiver, Sender};
use tikv_util::timer::GLOBAL_TIMER_HANDLE;
use tikv_util::worker::Scheduler;
use tokio_threadpool::{Builder as ThreadPoolBuilder, ThreadPool};
const SCHEDULER_IS_BUSY: &str = "scheduler is busy";
const GC_WORKER_IS_BUSY: &str = "gc worker is busy";
const GRPC_MSG_MAX_BATCH_SIZE: usize = 128;
const GRPC_MSG_NOTIFY_SIZE: usize = 8;
const REQUEST_BATCH_LIMITER_SAMPLE_WINDOW: usize = 30;
const REQUEST_BATCH_LIMITER_LOW_LOAD_RATIO: f32 = 0.3;
#[derive(Hash, PartialEq, Eq, Debug)]
struct RegionVerId {
region_id: u64,
conf_ver: u64,
version: u64,
term: u64,
}
impl RegionVerId {
#[inline]
fn from_context(ctx: &Context) -> Self {
RegionVerId {
region_id: ctx.get_region_id(),
conf_ver: ctx.get_region_epoch().get_conf_ver(),
version: ctx.get_region_epoch().get_version(),
term: ctx.get_term(),
}
}
}
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord)]
enum BatchableRequestKind {
PointGet,
Prewrite,
Commit,
}
impl BatchableRequestKind {
fn as_str(&self) -> &str {
match self {
BatchableRequestKind::PointGet => &"point_get",
BatchableRequestKind::Prewrite => &"prewrite",
BatchableRequestKind::Commit => &"commit",
}
}
}
/// BatchLimiter controls submit timing of request batch.
struct BatchLimiter {
cmd: BatchableRequestKind,
timeout: Option<Duration>,
last_submit_time: Instant,
latency_reader: HistogramReader,
latency_estimation: f64,
thread_load_reader: Arc<ThreadLoad>,
thread_load_estimation: usize,
sample_size: usize,
enable_batch: bool,
batch_input: usize,
}
impl BatchLimiter {
/// Construct a new `BatchLimiter` with provided timeout-duration,
/// and reader on latency and thread-load.
fn new(
cmd: BatchableRequestKind,
timeout: Option<Duration>,
latency_reader: HistogramReader,
thread_load_reader: Arc<ThreadLoad>,
) -> Self {
BatchLimiter {
cmd,
timeout,
last_submit_time: Instant::now(),
latency_reader,
latency_estimation: 0.0,
thread_load_reader,
thread_load_estimation: 100,
sample_size: 0,
enable_batch: false,
batch_input: 0,
}
}
/// Whether this batch limiter is disabled.
#[inline]
fn disabled(&self) -> bool {
self.timeout.is_none()
}
/// Whether the batch is timely due to be submitted.
#[inline]
fn is_due(&self, now: Instant) -> bool {
if let Some(timeout) = self.timeout {
now - self.last_submit_time >= timeout
} else {
true
}
}
/// Whether current batch needs more requests.
#[inline]
fn needs_more(&self) -> bool {
self.enable_batch
}
/// Observe a tick from timer guard. Limiter will update statistics at this point.
#[inline]
fn observe_tick(&mut self) {
if self.disabled() {
return;
}
self.sample_size += 1;
if self.enable_batch {
// check if thread load is too low, which means busy hour has passed.
if self.thread_load_reader.load()
< (self.thread_load_estimation as f32 * REQUEST_BATCH_LIMITER_LOW_LOAD_RATIO)
as usize
{
self.enable_batch = false;
}
} else if self.sample_size > REQUEST_BATCH_LIMITER_SAMPLE_WINDOW {
self.sample_size = 0;
let latency = self.latency_reader.read_latest_avg() * 1000.0;
let load = self.thread_load_reader.load();
self.latency_estimation = (self.latency_estimation + latency) / 2.0;
if load > 70 {
// thread load is less sensitive to workload,
// a small barrier here to make sure we have good samples of thread load.
let timeout = self.timeout.unwrap();
if latency > timeout.as_millis() as f64 * 2.0 {
self.thread_load_estimation = (self.thread_load_estimation + load) / 2;
}
if self.latency_estimation > timeout.as_millis() as f64 * 2.0 {
self.enable_batch = true;
self.latency_estimation = 0.0;
}
}
}
}
/// Observe the size of commands been examined by batcher.
/// Command may not be batched but must have the valid type for this batch.
#[inline]
fn observe_input(&mut self, size: usize) {
self.batch_input += size;
}
/// Observe the time and output size of one batch submit.
#[inline]
fn observe_submit(&mut self, now: Instant, size: usize) {
self.last_submit_time = now;
if self.enable_batch {
REQUEST_BATCH_SIZE_HISTOGRAM_VEC
.with_label_values(&[self.cmd.as_str()])
.observe(self.batch_input as f64);
if size > 0 {
REQUEST_BATCH_RATIO_HISTOGRAM_VEC
.with_label_values(&[self.cmd.as_str()])
.observe(self.batch_input as f64 / size as f64);
}
}
self.batch_input = 0;
}
}
/// Batcher buffers specific requests in one stream of `batch_commands` in a batch for bulk submit.
trait Batcher<E: Engine, L: LockMgr> {
/// Try to batch single batch_command request, returns whether the request is stashed.
/// One batcher must only process requests from one unique command stream.
fn filter(
&mut self,
request_id: u64,
request: &mut batch_commands_request::request::Cmd,
) -> bool;
/// Submit all batched requests to store. `is_empty` always returns true after this operation.
/// Returns number of fused commands been submitted.
fn submit(
&mut self,
tx: &Sender<(u64, batch_commands_response::Response)>,
storage: &Storage<E, L>,
) -> usize;
/// Whether this batcher is empty of buffered requests.
fn is_empty(&self) -> bool;
}
#[derive(Hash, PartialEq, Eq, Debug)]
struct ReadId {
region: RegionVerId,
// None in this field stands for transactional read.
cf: Option<String>,
}
impl ReadId {
#[inline]
fn from_context_cf(ctx: &Context, cf: Option<String>) -> Self {
ReadId {
region: RegionVerId::from_context(ctx),
cf,
}
}
}
/// ReadBatcher batches normal-priority `raw_get` and `get` requests to the same region.
struct ReadBatcher {
router: HashMap<ReadId, (Vec<u64>, Vec<PointGetCommand>)>,
}
impl ReadBatcher {
fn new() -> Self {
ReadBatcher {
router: HashMap::default(),
}
}
fn is_batchable_context(ctx: &Context) -> bool {
storage::is_normal_priority(ctx.get_priority()) && !ctx.get_replica_read()
}
fn add_get(&mut self, request_id: u64, request: &mut GetRequest) {
let id = ReadId::from_context_cf(request.get_context(), None);
let command = PointGetCommand::from_get(request);
match self.router.get_mut(&id) {
Some((reqs, commands)) => {
reqs.push(request_id);
commands.push(command);
}
None => {
self.router.insert(id, (vec![request_id], vec![command]));
}
}
}
fn add_raw_get(&mut self, request_id: u64, request: &mut RawGetRequest) {
let cf = Some(request.take_cf());
let id = ReadId::from_context_cf(request.get_context(), cf);
let command = PointGetCommand::from_raw_get(request);
match self.router.get_mut(&id) {
Some((reqs, commands)) => {
reqs.push(request_id);
commands.push(command);
}
None => {
self.router.insert(id, (vec![request_id], vec![command]));
}
}
}
}
impl<E: Engine, L: LockMgr> Batcher<E, L> for ReadBatcher {
fn filter(
&mut self,
request_id: u64,
request: &mut batch_commands_request::request::Cmd,
) -> bool {
match request {
batch_commands_request::request::Cmd::Get(req)
if Self::is_batchable_context(req.get_context()) =>
{
self.add_get(request_id, req);
true
}
batch_commands_request::request::Cmd::RawGet(req)
if Self::is_batchable_context(req.get_context()) =>
{
self.add_raw_get(request_id, req);
true
}
_ => false,
}
}
fn submit(
&mut self,
tx: &Sender<(u64, batch_commands_response::Response)>,
storage: &Storage<E, L>,
) -> usize {
let mut output = 0;
for (id, (reqs, commands)) in self.router.drain() {
let tx = tx.clone();
output += 1;
match id.cf {
Some(cf) => {
let f = future_raw_batch_get_command(storage, tx, reqs, cf, commands);
poll_future_notify(f);
}
None => {
let f = future_batch_get_command(storage, tx, reqs, commands);
poll_future_notify(f);
}
}
}
output
}
fn is_empty(&self) -> bool {
self.router.is_empty()
}
}
type ReqBatcherInner<E, L> = (BatchLimiter, Box<dyn Batcher<E, L> + Send>);
/// ReqBatcher manages multiple `Batcher`s which batch requests from one unique stream of `batch_commands`
// and controls the submit timing of those batchers based on respective `BatchLimiter`.
struct ReqBatcher<E: Engine, L: LockMgr> {
inners: BTreeMap<BatchableRequestKind, ReqBatcherInner<E, L>>,
tx: Sender<(u64, batch_commands_response::Response)>,
}
impl<E: Engine, L: LockMgr> ReqBatcher<E, L> {
/// Constructs a new `ReqBatcher` which provides batching of one request stream with specific response channel.
pub fn new(
tx: Sender<(u64, batch_commands_response::Response)>,
timeout: Option<Duration>,
readpool_thread_load: Arc<ThreadLoad>,
) -> Self {
let mut inners = BTreeMap::<BatchableRequestKind, ReqBatcherInner<E, L>>::default();
inners.insert(
BatchableRequestKind::PointGet,
(
BatchLimiter::new(
BatchableRequestKind::PointGet,
timeout,
HistogramReader::new(GRPC_MSG_HISTOGRAM_VEC.kv_get.clone()),
readpool_thread_load,
),
Box::new(ReadBatcher::new()),
),
);
ReqBatcher { inners, tx }
}
/// Try to batch single batch_command request, returns whether the request is stashed.
/// One batcher can only accept requests from one unique command stream.
pub fn filter(
&mut self,
request_id: u64,
request: &mut batch_commands_request::Request,
) -> bool {
if let Some(ref mut cmd) = request.cmd {
if let Some((limiter, batcher)) = match cmd {
batch_commands_request::request::Cmd::Prewrite(_) => {
self.inners.get_mut(&BatchableRequestKind::Prewrite)
}
batch_commands_request::request::Cmd::Commit(_) => {
self.inners.get_mut(&BatchableRequestKind::Commit)
}
batch_commands_request::request::Cmd::Get(_)
| batch_commands_request::request::Cmd::RawGet(_) => {
self.inners.get_mut(&BatchableRequestKind::PointGet)
}
_ => None,
} {
// in normal mode, batch requests inside one `batch_commands`.
// in cross-command mode, only batch request when limiter permits.
if limiter.disabled() || limiter.needs_more() {
limiter.observe_input(1);
return batcher.filter(request_id, cmd);
}
}
}
false
}
/// Check all batchers and submit if their limiters see fit.
/// Called by anyone with a suitable timeslice for executing commands.
#[inline]
pub fn maybe_submit(&mut self, storage: &Storage<E, L>) {
let mut now = None;
for (limiter, batcher) in self.inners.values_mut() {
if limiter.disabled() || !limiter.needs_more() {
if now.is_none() {
now = Some(Instant::now());
}
limiter.observe_submit(now.unwrap(), batcher.submit(&self.tx, storage));
}
}
}
/// Check all batchers and submit if their wait duration has exceeded the max limit.
/// Called repeatedly every `request-batch-wait-duration` interval after the batcher starts working.
#[inline]
pub fn should_submit(&mut self, storage: &Storage<E, L>) {
let now = Instant::now();
for (limiter, batcher) in self.inners.values_mut() {
limiter.observe_tick();
if limiter.is_due(now) {
limiter.observe_submit(now, batcher.submit(&self.tx, storage));
}
}
}
/// Whether or not every batcher is empty of buffered requests.
#[inline]
pub fn is_empty(&self) -> bool {
for (_, batcher) in self.inners.values() {
if !batcher.is_empty() {
return false;
}
}
true
}
}
/// Service handles the RPC messages for the `Tikv` service.
#[derive(Clone)]
pub struct Service<T: RaftStoreRouter + 'static, E: Engine, L: LockMgr> {
/// Used to handle requests related to GC.
gc_worker: GCWorker<E>,
// For handling KV requests.
storage: Storage<E, L>,
// For handling coprocessor requests.
cop: Endpoint<E>,
// For handling raft messages.
ch: T,
// For handling snapshot.
snap_scheduler: Scheduler<SnapTask>,
enable_req_batch: bool,
req_batch_wait_duration: Option<Duration>,
timer_pool: Arc<Mutex<ThreadPool>>,
grpc_thread_load: Arc<ThreadLoad>,
readpool_normal_thread_load: Arc<ThreadLoad>,
}
impl<T: RaftStoreRouter + 'static, E: Engine, L: LockMgr> Service<T, E, L> {
/// Constructs a new `Service` which provides the `Tikv` service.
pub fn new(
storage: Storage<E, L>,
gc_worker: GCWorker<E>,
cop: Endpoint<E>,
ch: T,
snap_scheduler: Scheduler<SnapTask>,
grpc_thread_load: Arc<ThreadLoad>,
readpool_normal_thread_load: Arc<ThreadLoad>,
enable_req_batch: bool,
req_batch_wait_duration: Option<Duration>,
) -> Self {
let timer_pool = Arc::new(Mutex::new(
ThreadPoolBuilder::new()
.pool_size(1)
.name_prefix("req_batch_timer_guard")
.build(),
));
Service {
gc_worker,
storage,
cop,
ch,
snap_scheduler,
grpc_thread_load,
readpool_normal_thread_load,
timer_pool,
enable_req_batch,
req_batch_wait_duration,
}
}
fn send_fail_status<M>(
&self,
ctx: RpcContext<'_>,
sink: UnarySink<M>,
err: Error,
code: RpcStatusCode,
) {
let status = RpcStatus::new(code, Some(format!("{}", err)));
ctx.spawn(sink.fail(status).map_err(|_| ()));
}
}
impl<T: RaftStoreRouter + 'static, E: Engine, L: LockMgr> Tikv for Service<T, E, L> {
fn kv_get(&mut self, ctx: RpcContext<'_>, req: GetRequest, sink: UnarySink<GetResponse>) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_get.start_coarse_timer();
let future = future_get(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_get",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_get.inc();
});
ctx.spawn(future);
}
fn kv_scan(&mut self, ctx: RpcContext<'_>, req: ScanRequest, sink: UnarySink<ScanResponse>) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_scan.start_coarse_timer();
let future = future_scan(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_scan",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_scan.inc();
});
ctx.spawn(future);
}
fn kv_prewrite(
&mut self,
ctx: RpcContext<'_>,
req: PrewriteRequest,
sink: UnarySink<PrewriteResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_prewrite.start_coarse_timer();
let future = future_prewrite(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_prewrite",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_prewrite.inc();
});
ctx.spawn(future);
}
fn kv_pessimistic_lock(
&mut self,
ctx: RpcContext<'_>,
req: PessimisticLockRequest,
sink: UnarySink<PessimisticLockResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_pessimistic_lock
.start_coarse_timer();
let future = future_acquire_pessimistic_lock(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_pessimistic_lock",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_pessimistic_lock.inc();
});
ctx.spawn(future);
}
fn kv_pessimistic_rollback(
&mut self,
ctx: RpcContext<'_>,
req: PessimisticRollbackRequest,
sink: UnarySink<PessimisticRollbackResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_pessimistic_rollback
.start_coarse_timer();
let future = future_pessimistic_rollback(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_pessimistic_rollback",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_pessimistic_rollback.inc();
});
ctx.spawn(future);
}
fn kv_commit(
&mut self,
ctx: RpcContext<'_>,
req: CommitRequest,
sink: UnarySink<CommitResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_commit.start_coarse_timer();
let future = future_commit(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_commit",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_commit.inc();
});
ctx.spawn(future);
}
fn kv_import(&mut self, _: RpcContext<'_>, _: ImportRequest, _: UnarySink<ImportResponse>) {
unimplemented!();
}
fn kv_cleanup(
&mut self,
ctx: RpcContext<'_>,
req: CleanupRequest,
sink: UnarySink<CleanupResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_cleanup.start_coarse_timer();
let future = future_cleanup(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_cleanup",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_cleanup.inc();
});
ctx.spawn(future);
}
fn kv_batch_get(
&mut self,
ctx: RpcContext<'_>,
req: BatchGetRequest,
sink: UnarySink<BatchGetResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_batch_get.start_coarse_timer();
let future = future_batch_get(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_batch_get",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_batch_get.inc();
});
ctx.spawn(future);
}
fn kv_batch_rollback(
&mut self,
ctx: RpcContext<'_>,
req: BatchRollbackRequest,
sink: UnarySink<BatchRollbackResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_batch_rollback
.start_coarse_timer();
let future = future_batch_rollback(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_batch_rollback",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_batch_rollback.inc();
});
ctx.spawn(future);
}
fn kv_txn_heart_beat(
&mut self,
ctx: RpcContext<'_>,
req: TxnHeartBeatRequest,
sink: UnarySink<TxnHeartBeatResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_txn_heart_beat
.start_coarse_timer();
let future = future_txn_heart_beat(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_txn_heart_beat",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_txn_heart_beat.inc();
});
ctx.spawn(future);
}
fn kv_check_txn_status(
&mut self,
ctx: RpcContext<'_>,
req: CheckTxnStatusRequest,
sink: UnarySink<CheckTxnStatusResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_check_txn_status
.start_coarse_timer();
let future = future_check_txn_status(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_check_txn_status",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_check_txn_status.inc();
});
ctx.spawn(future);
}
fn kv_scan_lock(
&mut self,
ctx: RpcContext<'_>,
req: ScanLockRequest,
sink: UnarySink<ScanLockResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_scan_lock.start_coarse_timer();
let future = future_scan_lock(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_scan_lock",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_scan_lock.inc();
});
ctx.spawn(future);
}
fn kv_resolve_lock(
&mut self,
ctx: RpcContext<'_>,
req: ResolveLockRequest,
sink: UnarySink<ResolveLockResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_resolve_lock.start_coarse_timer();
let future = future_resolve_lock(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_resolve_lock",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_resolve_lock.inc();
});
ctx.spawn(future);
}
fn kv_gc(&mut self, ctx: RpcContext<'_>, req: GcRequest, sink: UnarySink<GcResponse>) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_gc.start_coarse_timer();
let future = future_gc(&self.gc_worker, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_gc",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_gc.inc();
});
ctx.spawn(future);
}
fn kv_delete_range(
&mut self,
ctx: RpcContext<'_>,
req: DeleteRangeRequest,
sink: UnarySink<DeleteRangeResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_delete_range.start_coarse_timer();
let future = future_delete_range(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "kv_delete_range",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.kv_delete_range.inc();
});
ctx.spawn(future);
}
fn raw_get(
&mut self,
ctx: RpcContext<'_>,
req: RawGetRequest,
sink: UnarySink<RawGetResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_get.start_coarse_timer();
let future = future_raw_get(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_get",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_get.inc();
});
ctx.spawn(future);
}
fn raw_batch_get(
&mut self,
ctx: RpcContext<'_>,
req: RawBatchGetRequest,
sink: UnarySink<RawBatchGetResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_get.start_coarse_timer();
let future = future_raw_batch_get(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_batch_get",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_batch_get.inc();
});
ctx.spawn(future);
}
fn raw_scan(
&mut self,
ctx: RpcContext<'_>,
req: RawScanRequest,
sink: UnarySink<RawScanResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_scan.start_coarse_timer();
let future = future_raw_scan(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_scan",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_scan.inc();
});
ctx.spawn(future);
}
fn raw_batch_scan(
&mut self,
ctx: RpcContext<'_>,
req: RawBatchScanRequest,
sink: UnarySink<RawBatchScanResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_scan.start_coarse_timer();
let future = future_raw_batch_scan(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_batch_scan",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_batch_scan.inc();
});
ctx.spawn(future);
}
fn raw_put(
&mut self,
ctx: RpcContext<'_>,
req: RawPutRequest,
sink: UnarySink<RawPutResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_put.start_coarse_timer();
let future = future_raw_put(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_put",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_put.inc();
});
ctx.spawn(future);
}
fn raw_batch_put(
&mut self,
ctx: RpcContext<'_>,
req: RawBatchPutRequest,
sink: UnarySink<RawBatchPutResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_put.start_coarse_timer();
let future = future_raw_batch_put(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_batch_put",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_batch_put.inc();
});
ctx.spawn(future);
}
fn raw_delete(
&mut self,
ctx: RpcContext<'_>,
req: RawDeleteRequest,
sink: UnarySink<RawDeleteResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_delete.start_coarse_timer();
let future = future_raw_delete(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_delete",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_delete.inc();
});
ctx.spawn(future);
}
fn raw_batch_delete(
&mut self,
ctx: RpcContext<'_>,
req: RawBatchDeleteRequest,
sink: UnarySink<RawBatchDeleteResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_delete.start_coarse_timer();
let future = future_raw_batch_delete(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_batch_delete",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_batch_delete.inc();
});
ctx.spawn(future);
}
fn raw_delete_range(
&mut self,
ctx: RpcContext<'_>,
req: RawDeleteRangeRequest,
sink: UnarySink<RawDeleteRangeResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_delete_range.start_coarse_timer();
let future = future_raw_delete_range(&self.storage, req)
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "raw_delete_range",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.raw_delete_range.inc();
});
ctx.spawn(future);
}
fn unsafe_destroy_range(
&mut self,
ctx: RpcContext<'_>,
mut req: UnsafeDestroyRangeRequest,
sink: UnarySink<UnsafeDestroyRangeResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.unsafe_destroy_range
.start_coarse_timer();
// DestroyRange is a very dangerous operation. We don't allow passing MIN_KEY as start, or
// MAX_KEY as end here.
assert!(!req.get_start_key().is_empty());
assert!(!req.get_end_key().is_empty());
let (cb, f) = paired_future_callback();
let res = self.gc_worker.async_unsafe_destroy_range(
req.take_context(),
Key::from_raw(&req.take_start_key()),
Key::from_raw(&req.take_end_key()),
cb,
);
let future = AndThenWith::new(res, f.map_err(Error::from))
.and_then(|v| {
let mut resp = UnsafeDestroyRangeResponse::default();
// Region error is impossible here.
if let Err(e) = v {
resp.set_error(format!("{}", e));
}
sink.success(resp).map_err(Error::from)
})
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "unsafe_destroy_range",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.unsafe_destroy_range.inc();
});
ctx.spawn(future);
}
fn coprocessor(&mut self, ctx: RpcContext<'_>, req: Request, sink: UnarySink<Response>) {
let timer = GRPC_MSG_HISTOGRAM_VEC.coprocessor.start_coarse_timer();
let future = future_cop(&self.cop, req, Some(ctx.peer()))
.and_then(|resp| sink.success(resp).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "coprocessor",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.coprocessor.inc();
});
ctx.spawn(future);
}
fn coprocessor_stream(
&mut self,
ctx: RpcContext<'_>,
req: Request,
sink: ServerStreamingSink<Response>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.coprocessor_stream
.start_coarse_timer();
let stream = self
.cop
.parse_and_handle_stream_request(req, Some(ctx.peer()))
.map(|resp| (resp, WriteFlags::default().buffer_hint(true)))
.map_err(|e| {
let code = RpcStatusCode::UNKNOWN;
let msg = Some(format!("{:?}", e));
GrpcError::RpcFailure(RpcStatus::new(code, msg))
});
let future = sink
.send_all(stream)
.map(|_| timer.observe_duration())
.map_err(Error::from)
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "coprocessor_stream",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.coprocessor_stream.inc();
});
ctx.spawn(future);
}
fn raft(
&mut self,
ctx: RpcContext<'_>,
stream: RequestStream<RaftMessage>,
sink: ClientStreamingSink<Done>,
) {
let ch = self.ch.clone();
ctx.spawn(
stream
.map_err(Error::from)
.for_each(move |msg| {
RAFT_MESSAGE_RECV_COUNTER.inc();
ch.send_raft_msg(msg).map_err(Error::from)
})
.then(|res| {
let status = match res {
Err(e) => {
let msg = format!("{:?}", e);
error!("dispatch raft msg from gRPC to raftstore fail"; "err" => %msg);
RpcStatus::new(RpcStatusCode::UNKNOWN, Some(msg))
}
Ok(_) => RpcStatus::new(RpcStatusCode::UNKNOWN, None),
};
sink.fail(status)
.map_err(|e| error!("KvService::raft send response fail"; "err" => ?e))
}),
);
}
fn batch_raft(
&mut self,
ctx: RpcContext<'_>,
stream: RequestStream<BatchRaftMessage>,
sink: ClientStreamingSink<Done>,
) {
info!("batch_raft RPC is called, new gRPC stream established");
let ch = self.ch.clone();
ctx.spawn(
stream
.map_err(Error::from)
.for_each(move |mut msgs| {
let len = msgs.get_msgs().len();
RAFT_MESSAGE_RECV_COUNTER.inc_by(len as i64);
RAFT_MESSAGE_BATCH_SIZE.observe(len as f64);
for msg in msgs.take_msgs().into_iter() {
if let Err(e) = ch.send_raft_msg(msg) {
return Err(Error::from(e));
}
}
Ok(())
})
.then(|res| {
let status = match res {
Err(e) => {
let msg = format!("{:?}", e);
error!("dispatch raft msg from gRPC to raftstore fail"; "err" => %msg);
RpcStatus::new(RpcStatusCode::UNKNOWN, Some(msg))
}
Ok(_) => RpcStatus::new(RpcStatusCode::UNKNOWN, None),
};
sink.fail(status).map_err(
|e| error!("KvService::batch_raft send response fail"; "err" => ?e),
)
}),
)
}
fn snapshot(
&mut self,
ctx: RpcContext<'_>,
stream: RequestStream<SnapshotChunk>,
sink: ClientStreamingSink<Done>,
) {
let task = SnapTask::Recv { stream, sink };
if let Err(e) = self.snap_scheduler.schedule(task) {
let err_msg = format!("{}", e);
let sink = match e.into_inner() {
SnapTask::Recv { sink, .. } => sink,
_ => unreachable!(),
};
let status = RpcStatus::new(RpcStatusCode::RESOURCE_EXHAUSTED, Some(err_msg));
ctx.spawn(sink.fail(status).map_err(|_| ()));
}
}
fn mvcc_get_by_key(
&mut self,
ctx: RpcContext<'_>,
mut req: MvccGetByKeyRequest,
sink: UnarySink<MvccGetByKeyResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.mvcc_get_by_key.start_coarse_timer();
let key = Key::from_raw(req.get_key());
let (cb, f) = paired_future_callback();
let res = self
.storage
.async_mvcc_by_key(req.take_context(), key.clone(), cb);
let future = AndThenWith::new(res, f.map_err(Error::from))
.and_then(|v| {
let mut resp = MvccGetByKeyResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(mvcc) => {
resp.set_info(extract_mvcc_info(mvcc));
}
Err(e) => resp.set_error(format!("{}", e)),
};
}
sink.success(resp).map_err(Error::from)
})
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "mvcc_get_by_key",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.mvcc_get_by_key.inc();
});
ctx.spawn(future);
}
fn mvcc_get_by_start_ts(
&mut self,
ctx: RpcContext<'_>,
mut req: MvccGetByStartTsRequest,
sink: UnarySink<MvccGetByStartTsResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC
.mvcc_get_by_start_ts
.start_coarse_timer();
let (cb, f) = paired_future_callback();
let res = self
.storage
.async_mvcc_by_start_ts(req.take_context(), req.get_start_ts(), cb);
let future = AndThenWith::new(res, f.map_err(Error::from))
.and_then(|v| {
let mut resp = MvccGetByStartTsResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(Some((k, vv))) => {
resp.set_key(k.into_raw().unwrap());
resp.set_info(extract_mvcc_info(vv));
}
Ok(None) => {
resp.set_info(Default::default());
}
Err(e) => resp.set_error(format!("{}", e)),
}
}
sink.success(resp).map_err(Error::from)
})
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "mvcc_get_by_start_ts",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.mvcc_get_by_start_ts.inc();
});
ctx.spawn(future);
}
fn split_region(
&mut self,
ctx: RpcContext<'_>,
mut req: SplitRegionRequest,
sink: UnarySink<SplitRegionResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.split_region.start_coarse_timer();
let region_id = req.get_context().get_region_id();
let (cb, future) = paired_future_callback();
let mut split_keys = if !req.get_split_key().is_empty() {
vec![Key::from_raw(req.get_split_key()).into_encoded()]
} else {
req.take_split_keys()
.into_iter()
.map(|x| Key::from_raw(&x).into_encoded())
.collect()
};
split_keys.sort();
let req = CasualMessage::SplitRegion {
region_epoch: req.take_context().take_region_epoch(),
split_keys,
callback: Callback::Write(cb),
};
if let Err(e) = self.ch.casual_send(region_id, req) {
self.send_fail_status(ctx, sink, Error::from(e), RpcStatusCode::RESOURCE_EXHAUSTED);
return;
}
let future = future
.map_err(Error::from)
.map(move |mut v| {
let mut resp = SplitRegionResponse::default();
if v.response.get_header().has_error() {
resp.set_region_error(v.response.mut_header().take_error());
} else {
let admin_resp = v.response.mut_admin_response();
let regions: Vec<_> = admin_resp.mut_splits().take_regions().into();
if regions.len() < 2 {
error!(
"invalid split response";
"region_id" => region_id,
"resp" => ?admin_resp
);
resp.mut_region_error().set_message(format!(
"Internal Error: invalid response: {:?}",
admin_resp
));
} else {
if regions.len() == 2 {
resp.set_left(regions[0].clone());
resp.set_right(regions[1].clone());
}
resp.set_regions(regions.into());
}
}
resp
})
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "split_region",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.split_region.inc();
});
ctx.spawn(future);
}
fn read_index(
&mut self,
ctx: RpcContext<'_>,
req: ReadIndexRequest,
sink: UnarySink<ReadIndexResponse>,
) {
let timer = GRPC_MSG_HISTOGRAM_VEC.read_index.start_coarse_timer();
let region_id = req.get_context().get_region_id();
let mut cmd = RaftCmdRequest::default();
let mut header = RaftRequestHeader::default();
let mut inner_req = RaftRequest::default();
inner_req.set_cmd_type(CmdType::ReadIndex);
header.set_region_id(req.get_context().get_region_id());
header.set_peer(req.get_context().get_peer().clone());
header.set_region_epoch(req.get_context().get_region_epoch().clone());
if req.get_context().get_term() != 0 {
header.set_term(req.get_context().get_term());
}
header.set_sync_log(req.get_context().get_sync_log());
header.set_read_quorum(true);
cmd.set_header(header);
cmd.set_requests(vec![inner_req].into());
let (cb, future) = paired_future_callback();
if let Err(e) = self.ch.send_command(cmd, Callback::Read(cb)) {
self.send_fail_status(ctx, sink, Error::from(e), RpcStatusCode::RESOURCE_EXHAUSTED);
return;
}
let future = future
.map_err(Error::from)
.map(move |mut v| {
let mut resp = ReadIndexResponse::default();
if v.response.get_header().has_error() {
resp.set_region_error(v.response.mut_header().take_error());
} else {
let raft_resps = v.response.get_responses();
if raft_resps.len() != 1 {
error!(
"invalid read index response";
"region_id" => region_id,
"response" => ?raft_resps
);
resp.mut_region_error().set_message(format!(
"Internal Error: invalid response: {:?}",
raft_resps
));
} else {
let read_index = raft_resps[0].get_read_index().get_read_index();
resp.set_read_index(read_index);
}
}
resp
})
.and_then(|res| sink.success(res).map_err(Error::from))
.map(|_| timer.observe_duration())
.map_err(move |e| {
debug!("kv rpc failed";
"request" => "read_index",
"err" => ?e
);
GRPC_MSG_FAIL_COUNTER.read_index.inc();
});
ctx.spawn(future);
}
fn batch_commands(
&mut self,
ctx: RpcContext<'_>,
stream: RequestStream<BatchCommandsRequest>,
sink: DuplexSink<BatchCommandsResponse>,
) {
let (tx, rx) = unbounded(GRPC_MSG_NOTIFY_SIZE);
let ctx = Arc::new(ctx);
let peer = ctx.peer();
let storage = self.storage.clone();
let cop = self.cop.clone();
let gc_worker = self.gc_worker.clone();
if self.enable_req_batch {
let stopped = Arc::new(AtomicBool::new(false));
let req_batcher = ReqBatcher::new(
tx.clone(),
self.req_batch_wait_duration,
Arc::clone(&self.readpool_normal_thread_load),
);
let req_batcher = Arc::new(Mutex::new(req_batcher));
if let Some(duration) = self.req_batch_wait_duration {
let storage = storage.clone();
let req_batcher = req_batcher.clone();
let req_batcher2 = req_batcher.clone();
let stopped = Arc::clone(&stopped);
let start = Instant::now();
let timer = GLOBAL_TIMER_HANDLE.clone();
self.timer_pool.lock().unwrap().spawn(
timer
.interval(start, duration)
.take_while(move |_| {
// only stop timer when no more incoming and old batch is submitted.
future::ok(
!stopped.load(Ordering::Relaxed)
|| !req_batcher2.lock().unwrap().is_empty(),
)
})
.for_each(move |_| {
req_batcher.lock().unwrap().should_submit(&storage);
Ok(())
})
.map_err(|e| error!("batch_commands timer errored"; "err" => ?e)),
);
}
let request_handler = stream.for_each(move |mut req| {
let request_ids = req.take_request_ids();
let requests: Vec<_> = req.take_requests().into();
GRPC_REQ_BATCH_COMMANDS_SIZE.observe(requests.len() as f64);
for (id, mut req) in request_ids.into_iter().zip(requests) {
if !req_batcher.lock().unwrap().filter(id, &mut req) {
handle_batch_commands_request(
&storage,
&gc_worker,
&cop,
&peer,
id,
req,
tx.clone(),
);
}
}
req_batcher.lock().unwrap().maybe_submit(&storage);
future::ok(())
});
ctx.spawn(
request_handler
.map_err(|e| error!("batch_commands error"; "err" => %e))
.and_then(move |_| {
// signal timer guard to stop polling
stopped.store(true, Ordering::Relaxed);
Ok(())
}),
);
} else {
let request_handler = stream.for_each(move |mut req| {
let request_ids = req.take_request_ids();
let requests: Vec<_> = req.take_requests().into();
GRPC_REQ_BATCH_COMMANDS_SIZE.observe(requests.len() as f64);
for (id, req) in request_ids.into_iter().zip(requests) {
handle_batch_commands_request(
&storage,
&gc_worker,
&cop,
&peer,
id,
req,
tx.clone(),
);
}
future::ok(())
});
ctx.spawn(request_handler.map_err(|e| error!("batch_commands error"; "err" => %e)));
};
let thread_load = Arc::clone(&self.grpc_thread_load);
let response_retriever = BatchReceiver::new(
rx,
GRPC_MSG_MAX_BATCH_SIZE,
BatchCommandsResponse::default,
|batch_resp, (id, resp)| {
batch_resp.mut_request_ids().push(id);
batch_resp.mut_responses().push(resp);
},
);
let response_retriever = response_retriever
.inspect(|r| GRPC_RESP_BATCH_COMMANDS_SIZE.observe(r.request_ids.len() as f64))
.map(move |mut r| {
r.set_transport_layer_load(thread_load.load() as u64);
(r, WriteFlags::default().buffer_hint(false))
})
.map_err(|e| {
let msg = Some(format!("{:?}", e));
GrpcError::RpcFailure(RpcStatus::new(RpcStatusCode::UNKNOWN, msg))
});
ctx.spawn(sink.send_all(response_retriever).map(|_| ()).map_err(|e| {
debug!("kv rpc failed";
"request" => "batch_commands",
"err" => ?e
);
}));
}
}
fn response_batch_commands_request<F>(
id: u64,
resp: F,
tx: Sender<(u64, batch_commands_response::Response)>,
timer: HistogramTimer,
) where
F: Future<Item = batch_commands_response::Response, Error = ()> + Send + 'static,
{
let f = resp.and_then(move |resp| {
if tx.send_and_notify((id, resp)).is_err() {
error!("KvService response batch commands fail");
return Err(());
}
timer.observe_duration();
Ok(())
});
poll_future_notify(f);
}
// BatchCommandsNotify is used to make business pool notifiy completion queues directly.
struct BatchCommandsNotify<F>(Arc<Mutex<Option<Spawn<F>>>>);
impl<F> Clone for BatchCommandsNotify<F> {
fn clone(&self) -> BatchCommandsNotify<F> {
BatchCommandsNotify(Arc::clone(&self.0))
}
}
impl<F> Notify for BatchCommandsNotify<F>
where
F: Future<Item = (), Error = ()> + Send + 'static,
{
fn notify(&self, id: usize) {
let n = Arc::new(self.clone());
let mut s = self.0.lock().unwrap();
match s.as_mut().map(|spawn| spawn.poll_future_notify(&n, id)) {
Some(Ok(Async::NotReady)) | None => {}
_ => *s = None,
};
}
}
fn poll_future_notify<F: Future<Item = (), Error = ()> + Send + 'static>(f: F) {
let spawn = Arc::new(Mutex::new(Some(executor::spawn(f))));
let notify = BatchCommandsNotify(spawn);
notify.notify(0);
}
fn handle_batch_commands_request<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
gc_worker: &GCWorker<E>,
cop: &Endpoint<E>,
peer: &str,
id: u64,
req: batch_commands_request::Request,
tx: Sender<(u64, batch_commands_response::Response)>,
) {
// To simplify code and make the logic more clear.
macro_rules! oneof {
($p:path) => {
|resp| {
let mut res = batch_commands_response::Response::default();
res.cmd = Some($p(resp));
res
}
};
}
match req.cmd {
None => {
// For some invalid requests.
let timer = GRPC_MSG_HISTOGRAM_VEC.invalid.start_coarse_timer();
let resp = future::ok(batch_commands_response::Response::default());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Get(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_get.start_coarse_timer();
let resp = future_get(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::Get))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_get.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Scan(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_scan.start_coarse_timer();
let resp = future_scan(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::Scan))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_scan.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Prewrite(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_prewrite.start_coarse_timer();
let resp = future_prewrite(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::Prewrite))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_prewrite.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Commit(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_commit.start_coarse_timer();
let resp = future_commit(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::Commit))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_commit.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Import(_)) => unimplemented!(),
Some(batch_commands_request::request::Cmd::Cleanup(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_cleanup.start_coarse_timer();
let resp = future_cleanup(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::Cleanup))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_cleanup.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::BatchGet(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_batch_get.start_coarse_timer();
let resp = future_batch_get(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::BatchGet))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_batch_get.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::BatchRollback(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_batch_rollback
.start_coarse_timer();
let resp = future_batch_rollback(&storage, req)
.map(oneof!(
batch_commands_response::response::Cmd::BatchRollback
))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_batch_rollback.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::TxnHeartBeat(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_txn_heart_beat
.start_coarse_timer();
let resp = future_txn_heart_beat(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::TxnHeartBeat))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_txn_heart_beat.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::CheckTxnStatus(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_check_txn_status
.start_coarse_timer();
let resp = future_check_txn_status(&storage, req)
.map(oneof!(
batch_commands_response::response::Cmd::CheckTxnStatus
))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_check_txn_status.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::ScanLock(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_scan_lock.start_coarse_timer();
let resp = future_scan_lock(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::ScanLock))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_scan_lock.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::ResolveLock(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_resolve_lock.start_coarse_timer();
let resp = future_resolve_lock(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::ResolveLock))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_resolve_lock.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Gc(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_gc.start_coarse_timer();
let resp = future_gc(&gc_worker, req)
.map(oneof!(batch_commands_response::response::Cmd::Gc))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_gc.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::DeleteRange(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.kv_delete_range.start_coarse_timer();
let resp = future_delete_range(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::DeleteRange))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_delete_range.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawGet(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_get.start_coarse_timer();
let resp = future_raw_get(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::RawGet))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_get.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawBatchGet(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_get.start_coarse_timer();
let resp = future_raw_batch_get(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::RawBatchGet))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_batch_get.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawPut(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_put.start_coarse_timer();
let resp = future_raw_put(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::RawPut))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_put.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawBatchPut(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_put.start_coarse_timer();
let resp = future_raw_batch_put(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::RawBatchPut))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_batch_put.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawDelete(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_delete.start_coarse_timer();
let resp = future_raw_delete(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::RawDelete))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_delete.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawBatchDelete(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_delete.start_coarse_timer();
let resp = future_raw_batch_delete(&storage, req)
.map(oneof!(
batch_commands_response::response::Cmd::RawBatchDelete
))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_batch_delete.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawScan(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_scan.start_coarse_timer();
let resp = future_raw_scan(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::RawScan))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_scan.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawDeleteRange(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_delete_range.start_coarse_timer();
let resp = future_raw_delete_range(&storage, req)
.map(oneof!(
batch_commands_response::response::Cmd::RawDeleteRange
))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_delete_range.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::RawBatchScan(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.raw_batch_scan.start_coarse_timer();
let resp = future_raw_batch_scan(&storage, req)
.map(oneof!(batch_commands_response::response::Cmd::RawBatchScan))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.raw_batch_scan.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Coprocessor(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.coprocessor.start_coarse_timer();
let resp = future_cop(&cop, req, Some(peer.to_string()))
.map(oneof!(batch_commands_response::response::Cmd::Coprocessor))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.coprocessor.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::PessimisticLock(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_pessimistic_lock
.start_coarse_timer();
let resp = future_acquire_pessimistic_lock(&storage, req)
.map(oneof!(
batch_commands_response::response::Cmd::PessimisticLock
))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_pessimistic_lock.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::PessimisticRollback(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_pessimistic_rollback
.start_coarse_timer();
let resp = future_pessimistic_rollback(&storage, req)
.map(oneof!(
batch_commands_response::response::Cmd::PessimisticRollback
))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.kv_pessimistic_rollback.inc());
response_batch_commands_request(id, resp, tx, timer);
}
Some(batch_commands_request::request::Cmd::Empty(req)) => {
let timer = GRPC_MSG_HISTOGRAM_VEC.invalid.start_coarse_timer();
let resp = future_handle_empty(req)
.map(oneof!(batch_commands_response::response::Cmd::Empty))
.map_err(|_| GRPC_MSG_FAIL_COUNTER.invalid.inc());
response_batch_commands_request(id, resp, tx, timer);
}
}
}
fn future_handle_empty(
req: BatchCommandsEmptyRequest,
) -> impl Future<Item = BatchCommandsEmptyResponse, Error = Error> {
tikv_util::timer::GLOBAL_TIMER_HANDLE
.delay(std::time::Instant::now() + std::time::Duration::from_millis(req.get_delay_time()))
.map(move |_| {
let mut res = BatchCommandsEmptyResponse::default();
res.set_test_id(req.get_test_id());
res
})
.map_err(|_| unreachable!())
}
fn future_get<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: GetRequest,
) -> impl Future<Item = GetResponse, Error = Error> {
storage
.async_get(
req.take_context(),
Key::from_raw(req.get_key()),
req.get_version(),
)
.then(|v| {
let mut resp = GetResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(Some(val)) => resp.set_value(val),
Ok(None) => resp.set_not_found(true),
Err(e) => resp.set_error(extract_key_error(&e)),
}
}
Ok(resp)
})
}
fn future_batch_get_command<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
tx: Sender<(u64, batch_commands_response::Response)>,
requests: Vec<u64>,
commands: Vec<PointGetCommand>,
) -> impl Future<Item = (), Error = ()> {
let timer = GRPC_MSG_HISTOGRAM_VEC
.kv_batch_get_command
.start_coarse_timer();
storage.async_batch_get_command(commands).then(move |v| {
match v {
Ok(v) => {
if requests.len() != v.len() {
error!("KvService batch response size mismatch");
}
for (req, v) in requests.into_iter().zip(v.into_iter()) {
let mut resp = GetResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(Some(val)) => resp.set_value(val),
Ok(None) => resp.set_not_found(true),
Err(e) => resp.set_error(extract_key_error(&e)),
}
}
let mut res = batch_commands_response::Response::default();
res.cmd = Some(batch_commands_response::response::Cmd::Get(resp));
if tx.send_and_notify((req, res)).is_err() {
error!("KvService response batch commands fail");
}
}
}
e => {
let mut resp = GetResponse::default();
if let Some(err) = extract_region_error(&e) {
resp.set_region_error(err);
} else if let Err(e) = e {
resp.set_error(extract_key_error(&e));
}
let mut res = batch_commands_response::Response::default();
res.cmd = Some(batch_commands_response::response::Cmd::Get(resp));
for req in requests {
if tx.send_and_notify((req, res.clone())).is_err() {
error!("KvService response batch commands fail");
}
}
}
}
timer.observe_duration();
Ok(())
})
}
fn future_scan<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: ScanRequest,
) -> impl Future<Item = ScanResponse, Error = Error> {
let end_key = if req.get_end_key().is_empty() {
None
} else {
Some(Key::from_raw(req.get_end_key()))
};
let mut options = Options::default();
options.key_only = req.get_key_only();
options.reverse_scan = req.get_reverse();
storage
.async_scan(
req.take_context(),
Key::from_raw(req.get_start_key()),
end_key,
req.get_limit() as usize,
req.get_version(),
options,
)
.then(|v| {
let mut resp = ScanResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_pairs(extract_kv_pairs(v).into());
}
Ok(resp)
})
}
fn future_prewrite<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: PrewriteRequest,
) -> impl Future<Item = PrewriteResponse, Error = Error> {
let mutations = req
.take_mutations()
.into_iter()
.map(|mut x| match x.get_op() {
Op::Put => Mutation::Put((Key::from_raw(x.get_key()), x.take_value())),
Op::Del => Mutation::Delete(Key::from_raw(x.get_key())),
Op::Lock => Mutation::Lock(Key::from_raw(x.get_key())),
Op::Insert => Mutation::Insert((Key::from_raw(x.get_key()), x.take_value())),
_ => panic!("mismatch Op in prewrite mutations"),
})
.collect();
let mut options = Options::default();
options.lock_ttl = req.get_lock_ttl();
options.skip_constraint_check = req.get_skip_constraint_check();
options.for_update_ts = req.get_for_update_ts();
options.is_pessimistic_lock = req.take_is_pessimistic_lock();
options.txn_size = req.get_txn_size();
options.min_commit_ts = req.get_min_commit_ts();
let (cb, f) = paired_future_callback();
let res = storage.async_prewrite(
req.take_context(),
mutations,
req.take_primary_lock(),
req.get_start_version(),
options,
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = PrewriteResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_errors(extract_key_errors(v).into());
}
resp
})
}
fn future_acquire_pessimistic_lock<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: PessimisticLockRequest,
) -> impl Future<Item = PessimisticLockResponse, Error = Error> {
let keys = req
.take_mutations()
.into_iter()
.map(|x| match x.get_op() {
Op::PessimisticLock => (
Key::from_raw(x.get_key()),
x.get_assertion() == Assertion::NotExist,
),
_ => panic!("mismatch Op in pessimistic lock mutations"),
})
.collect();
let mut options = Options::default();
options.lock_ttl = req.get_lock_ttl();
options.is_first_lock = req.get_is_first_lock();
options.for_update_ts = req.get_for_update_ts();
options.wait_timeout = req.get_wait_timeout();
let (cb, f) = paired_future_callback();
let res = storage.async_acquire_pessimistic_lock(
req.take_context(),
keys,
req.take_primary_lock(),
req.get_start_version(),
options,
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = PessimisticLockResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_errors(extract_key_errors(v).into());
}
resp
})
}
fn future_pessimistic_rollback<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: PessimisticRollbackRequest,
) -> impl Future<Item = PessimisticRollbackResponse, Error = Error> {
let keys = req.get_keys().iter().map(|x| Key::from_raw(x)).collect();
let (cb, f) = paired_future_callback();
let res = storage.async_pessimistic_rollback(
req.take_context(),
keys,
req.get_start_version(),
req.get_for_update_ts(),
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = PessimisticRollbackResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_errors(extract_key_errors(v).into());
}
resp
})
}
fn future_commit<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: CommitRequest,
) -> impl Future<Item = CommitResponse, Error = Error> {
let keys = req.get_keys().iter().map(|x| Key::from_raw(x)).collect();
let (cb, f) = paired_future_callback();
let res = storage.async_commit(
req.take_context(),
keys,
req.get_start_version(),
req.get_commit_version(),
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = CommitResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(extract_key_error(&e));
}
resp
})
}
fn future_cleanup<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: CleanupRequest,
) -> impl Future<Item = CleanupResponse, Error = Error> {
let (cb, f) = paired_future_callback();
let res = storage.async_cleanup(
req.take_context(),
Key::from_raw(req.get_key()),
req.get_start_version(),
req.get_current_ts(),
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = CleanupResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
if let Some(ts) = extract_committed(&e) {
resp.set_commit_version(ts);
} else {
resp.set_error(extract_key_error(&e));
}
}
resp
})
}
fn future_batch_get<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: BatchGetRequest,
) -> impl Future<Item = BatchGetResponse, Error = Error> {
let keys = req.get_keys().iter().map(|x| Key::from_raw(x)).collect();
storage
.async_batch_get(req.take_context(), keys, req.get_version())
.then(|v| {
let mut resp = BatchGetResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_pairs(extract_kv_pairs(v).into());
}
Ok(resp)
})
}
fn future_batch_rollback<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: BatchRollbackRequest,
) -> impl Future<Item = BatchRollbackResponse, Error = Error> {
let keys = req.get_keys().iter().map(|x| Key::from_raw(x)).collect();
let (cb, f) = paired_future_callback();
let res = storage.async_rollback(req.take_context(), keys, req.get_start_version(), cb);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = BatchRollbackResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(extract_key_error(&e));
}
resp
})
}
fn future_txn_heart_beat<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: TxnHeartBeatRequest,
) -> impl Future<Item = TxnHeartBeatResponse, Error = Error> {
let primary_key = Key::from_raw(req.get_primary_lock());
let (cb, f) = paired_future_callback();
let res = storage.async_txn_heart_beat(
req.take_context(),
primary_key,
req.get_start_version(),
req.get_advise_lock_ttl(),
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = TxnHeartBeatResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(txn_status) => {
if let TxnStatus::Uncommitted { lock_ttl, .. } = txn_status {
resp.set_lock_ttl(lock_ttl);
} else {
unreachable!();
}
}
Err(e) => resp.set_error(extract_key_error(&e)),
}
}
resp
})
}
fn future_check_txn_status<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: CheckTxnStatusRequest,
) -> impl Future<Item = CheckTxnStatusResponse, Error = Error> {
let primary_key = Key::from_raw(req.get_primary_key());
let (cb, f) = paired_future_callback();
let res = storage.async_check_txn_status(
req.take_context(),
primary_key,
req.get_lock_ts(),
req.get_caller_start_ts(),
req.get_current_ts(),
req.get_rollback_if_not_exist(),
cb,
);
let caller_start_ts = req.get_caller_start_ts();
AndThenWith::new(res, f.map_err(Error::from)).map(move |v| {
let mut resp = CheckTxnStatusResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(txn_status) => match txn_status {
TxnStatus::Rollbacked => resp.set_action(Action::NoAction),
TxnStatus::TtlExpire => resp.set_action(Action::TtlExpireRollback),
TxnStatus::LockNotExist => resp.set_action(Action::LockNotExistRollback),
TxnStatus::Committed { commit_ts } => resp.set_commit_version(commit_ts),
TxnStatus::Uncommitted {
lock_ttl,
min_commit_ts,
} => {
resp.set_lock_ttl(lock_ttl);
if min_commit_ts > caller_start_ts {
resp.set_action(Action::MinCommitTsPushed);
}
}
},
Err(e) => resp.set_error(extract_key_error(&e)),
}
}
resp
})
}
fn future_scan_lock<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: ScanLockRequest,
) -> impl Future<Item = ScanLockResponse, Error = Error> {
let (cb, f) = paired_future_callback();
let res = storage.async_scan_locks(
req.take_context(),
req.get_max_version(),
req.take_start_key(),
req.get_limit() as usize,
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = ScanLockResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(locks) => resp.set_locks(locks.into()),
Err(e) => resp.set_error(extract_key_error(&e)),
}
}
resp
})
}
fn future_resolve_lock<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: ResolveLockRequest,
) -> impl Future<Item = ResolveLockResponse, Error = Error> {
let resolve_keys: Vec<Key> = req
.get_keys()
.iter()
.map(|key| Key::from_raw(key))
.collect();
let txn_status = if req.get_start_version() > 0 {
HashMap::from_iter(iter::once((
req.get_start_version(),
req.get_commit_version(),
)))
} else {
HashMap::from_iter(
req.take_txn_infos()
.into_iter()
.map(|info| (info.txn, info.status)),
)
};
let (cb, f) = paired_future_callback();
let res = if !resolve_keys.is_empty() {
let start_ts = req.get_start_version();
assert!(start_ts > 0);
let commit_ts = req.get_commit_version();
storage.async_resolve_lock_lite(req.take_context(), start_ts, commit_ts, resolve_keys, cb)
} else {
storage.async_resolve_lock(req.take_context(), txn_status, cb)
};
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = ResolveLockResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(extract_key_error(&e));
}
resp
})
}
fn future_gc<E: Engine>(
gc_worker: &GCWorker<E>,
mut req: GcRequest,
) -> impl Future<Item = GcResponse, Error = Error> {
let (cb, f) = paired_future_callback();
let res = gc_worker.async_gc(req.take_context(), req.get_safe_point(), cb);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = GcResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(extract_key_error(&e));
}
resp
})
}
fn future_delete_range<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: DeleteRangeRequest,
) -> impl Future<Item = DeleteRangeResponse, Error = Error> {
let (cb, f) = paired_future_callback();
let res = storage.async_delete_range(
req.take_context(),
Key::from_raw(req.get_start_key()),
Key::from_raw(req.get_end_key()),
req.get_notify_only(),
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = DeleteRangeResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(format!("{}", e));
}
resp
})
}
fn future_raw_get<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawGetRequest,
) -> impl Future<Item = RawGetResponse, Error = Error> {
storage
.async_raw_get(req.take_context(), req.take_cf(), req.take_key())
.then(|v| {
let mut resp = RawGetResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(Some(val)) => resp.set_value(val),
Ok(None) => resp.set_not_found(true),
Err(e) => resp.set_error(format!("{}", e)),
}
}
Ok(resp)
})
}
fn future_raw_batch_get_command<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
tx: Sender<(u64, batch_commands_response::Response)>,
requests: Vec<u64>,
cf: String,
commands: Vec<PointGetCommand>,
) -> impl Future<Item = (), Error = ()> {
let timer = GRPC_MSG_HISTOGRAM_VEC
.raw_batch_get_command
.start_coarse_timer();
storage
.async_raw_batch_get_command(cf, commands)
.then(move |v| {
match v {
Ok(v) => {
if requests.len() != v.len() {
error!("KvService batch response size mismatch");
}
for (req, v) in requests.into_iter().zip(v.into_iter()) {
let mut resp = RawGetResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
match v {
Ok(Some(val)) => resp.set_value(val),
Ok(None) => resp.set_not_found(true),
Err(e) => resp.set_error(format!("{}", e)),
}
}
let mut res = batch_commands_response::Response::default();
res.cmd = Some(batch_commands_response::response::Cmd::RawGet(resp));
if tx.send_and_notify((req, res)).is_err() {
error!("KvService response batch commands fail");
}
}
}
e => {
let mut resp = RawGetResponse::default();
if let Some(err) = extract_region_error(&e) {
resp.set_region_error(err);
} else if let Err(e) = e {
resp.set_error(format!("{}", e));
}
let mut res = batch_commands_response::Response::default();
res.cmd = Some(batch_commands_response::response::Cmd::RawGet(resp));
for req in requests {
if tx.send_and_notify((req, res.clone())).is_err() {
error!("KvService response batch commands fail");
}
}
}
}
timer.observe_duration();
Ok(())
})
}
fn future_raw_batch_get<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawBatchGetRequest,
) -> impl Future<Item = RawBatchGetResponse, Error = Error> {
let keys = req.take_keys().into();
storage
.async_raw_batch_get(req.take_context(), req.take_cf(), keys)
.then(|v| {
let mut resp = RawBatchGetResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_pairs(extract_kv_pairs(v).into());
}
Ok(resp)
})
}
fn future_raw_put<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawPutRequest,
) -> impl Future<Item = RawPutResponse, Error = Error> {
let (cb, future) = paired_future_callback();
let res = storage.async_raw_put(
req.take_context(),
req.take_cf(),
req.take_key(),
req.take_value(),
cb,
);
AndThenWith::new(res, future.map_err(Error::from)).map(|v| {
let mut resp = RawPutResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(format!("{}", e));
}
resp
})
}
fn future_raw_batch_put<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawBatchPutRequest,
) -> impl Future<Item = RawBatchPutResponse, Error = Error> {
let cf = req.take_cf();
let pairs = req
.take_pairs()
.into_iter()
.map(|mut x| (x.take_key(), x.take_value()))
.collect();
let (cb, f) = paired_future_callback();
let res = storage.async_raw_batch_put(req.take_context(), cf, pairs, cb);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = RawBatchPutResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(format!("{}", e));
}
resp
})
}
fn future_raw_delete<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawDeleteRequest,
) -> impl Future<Item = RawDeleteResponse, Error = Error> {
let (cb, f) = paired_future_callback();
let res = storage.async_raw_delete(req.take_context(), req.take_cf(), req.take_key(), cb);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = RawDeleteResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(format!("{}", e));
}
resp
})
}
fn future_raw_batch_delete<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawBatchDeleteRequest,
) -> impl Future<Item = RawBatchDeleteResponse, Error = Error> {
let cf = req.take_cf();
let keys = req.take_keys().into();
let (cb, f) = paired_future_callback();
let res = storage.async_raw_batch_delete(req.take_context(), cf, keys, cb);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = RawBatchDeleteResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(format!("{}", e));
}
resp
})
}
fn future_raw_scan<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawScanRequest,
) -> impl Future<Item = RawScanResponse, Error = Error> {
let end_key = if req.get_end_key().is_empty() {
None
} else {
Some(req.take_end_key())
};
storage
.async_raw_scan(
req.take_context(),
req.take_cf(),
req.take_start_key(),
end_key,
req.get_limit() as usize,
req.get_key_only(),
req.get_reverse(),
)
.then(|v| {
let mut resp = RawScanResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_kvs(extract_kv_pairs(v).into());
}
Ok(resp)
})
}
fn future_raw_batch_scan<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawBatchScanRequest,
) -> impl Future<Item = RawBatchScanResponse, Error = Error> {
storage
.async_raw_batch_scan(
req.take_context(),
req.take_cf(),
req.take_ranges().into(),
req.get_each_limit() as usize,
req.get_key_only(),
req.get_reverse(),
)
.then(|v| {
let mut resp = RawBatchScanResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else {
resp.set_kvs(extract_kv_pairs(v).into());
}
Ok(resp)
})
}
fn future_raw_delete_range<E: Engine, L: LockMgr>(
storage: &Storage<E, L>,
mut req: RawDeleteRangeRequest,
) -> impl Future<Item = RawDeleteRangeResponse, Error = Error> {
let (cb, f) = paired_future_callback();
let res = storage.async_raw_delete_range(
req.take_context(),
req.take_cf(),
req.take_start_key(),
req.take_end_key(),
cb,
);
AndThenWith::new(res, f.map_err(Error::from)).map(|v| {
let mut resp = RawDeleteRangeResponse::default();
if let Some(err) = extract_region_error(&v) {
resp.set_region_error(err);
} else if let Err(e) = v {
resp.set_error(format!("{}", e));
}
resp
})
}
fn future_cop<E: Engine>(
cop: &Endpoint<E>,
req: Request,
peer: Option<String>,
) -> impl Future<Item = Response, Error = Error> {
cop.parse_and_handle_unary_request(req, peer)
.map_err(|_| unreachable!())
}
fn extract_region_error<T>(res: &storage::Result<T>) -> Option<RegionError> {
use crate::storage::Error;
match *res {
// TODO: use `Error::cause` instead.
Err(Error::Engine(EngineError::Request(ref e)))
| Err(Error::Txn(TxnError::Engine(EngineError::Request(ref e))))
| Err(Error::Txn(TxnError::Mvcc(MvccError::Engine(EngineError::Request(ref e))))) => {
Some(e.to_owned())
}
Err(Error::SchedTooBusy) => {
let mut err = RegionError::default();
let mut server_is_busy_err = ServerIsBusy::default();
server_is_busy_err.set_reason(SCHEDULER_IS_BUSY.to_owned());
err.set_server_is_busy(server_is_busy_err);
Some(err)
}
Err(Error::GCWorkerTooBusy) => {
let mut err = RegionError::default();
let mut server_is_busy_err = ServerIsBusy::default();
server_is_busy_err.set_reason(GC_WORKER_IS_BUSY.to_owned());
err.set_server_is_busy(server_is_busy_err);
Some(err)
}
Err(Error::Closed) => {
// TiKV is closing, return an RegionError to tell the client that this region is unavailable
// temporarily, the client should retry the request in other TiKVs.
let mut err = RegionError::default();
err.set_message("TiKV is Closing".to_string());
Some(err)
}
_ => None,
}
}
fn extract_committed(err: &storage::Error) -> Option<u64> {
match *err {
storage::Error::Txn(TxnError::Mvcc(MvccError::Committed { commit_ts })) => Some(commit_ts),
_ => None,
}
}
fn extract_key_error(err: &storage::Error) -> KeyError {
let mut key_error = KeyError::default();
match err {
storage::Error::Txn(TxnError::Mvcc(MvccError::KeyIsLocked(info))) => {
key_error.set_locked(info.clone());
}
// failed in prewrite or pessimistic lock
storage::Error::Txn(TxnError::Mvcc(MvccError::WriteConflict {
start_ts,
conflict_start_ts,
conflict_commit_ts,
key,
primary,
..
})) => {
let mut write_conflict = WriteConflict::default();
write_conflict.set_start_ts(*start_ts);
write_conflict.set_conflict_ts(*conflict_start_ts);
write_conflict.set_conflict_commit_ts(*conflict_commit_ts);
write_conflict.set_key(key.to_owned());
write_conflict.set_primary(primary.to_owned());
key_error.set_conflict(write_conflict);
// for compatibility with older versions.
key_error.set_retryable(format!("{:?}", err));
}
storage::Error::Txn(TxnError::Mvcc(MvccError::AlreadyExist { key })) => {
let mut exist = AlreadyExist::default();
exist.set_key(key.clone());
key_error.set_already_exist(exist);
}
// failed in commit
storage::Error::Txn(TxnError::Mvcc(MvccError::TxnLockNotFound { .. })) => {
warn!("txn conflicts"; "err" => ?err);
key_error.set_retryable(format!("{:?}", err));
}
storage::Error::Txn(TxnError::Mvcc(MvccError::TxnNotFound { start_ts, key })) => {
let mut txn_not_found = TxnNotFound::default();
txn_not_found.set_start_ts(*start_ts);
txn_not_found.set_primary_key(key.to_owned());
key_error.set_txn_not_found(txn_not_found);
}
storage::Error::Txn(TxnError::Mvcc(MvccError::Deadlock {
lock_ts,
lock_key,
deadlock_key_hash,
..
})) => {
warn!("txn deadlocks"; "err" => ?err);
let mut deadlock = Deadlock::default();
deadlock.set_lock_ts(*lock_ts);
deadlock.set_lock_key(lock_key.to_owned());
deadlock.set_deadlock_key_hash(*deadlock_key_hash);
key_error.set_deadlock(deadlock);
}
storage::Error::Txn(TxnError::Mvcc(MvccError::CommitTsExpired {
start_ts,
commit_ts,
key,
min_commit_ts,
})) => {
let mut commit_ts_expired = CommitTsExpired::default();
commit_ts_expired.set_start_ts(*start_ts);
commit_ts_expired.set_attempted_commit_ts(*commit_ts);
commit_ts_expired.set_key(key.to_owned());
commit_ts_expired.set_min_commit_ts(*min_commit_ts);
key_error.set_commit_ts_expired(commit_ts_expired);
}
_ => {
error!("txn aborts"; "err" => ?err);
key_error.set_abort(format!("{:?}", err));
}
}
key_error
}
fn extract_kv_pairs(res: storage::Result<Vec<storage::Result<storage::KvPair>>>) -> Vec<KvPair> {
match res {
Ok(res) => res
.into_iter()
.map(|r| match r {
Ok((key, value)) => {
let mut pair = KvPair::default();
pair.set_key(key);
pair.set_value(value);
pair
}
Err(e) => {
let mut pair = KvPair::default();
pair.set_error(extract_key_error(&e));
pair
}
})
.collect(),
Err(e) => {
let mut pair = KvPair::default();
pair.set_error(extract_key_error(&e));
vec![pair]
}
}
}
fn extract_mvcc_info(mvcc: storage::MvccInfo) -> MvccInfo {
let mut mvcc_info = MvccInfo::default();
if let Some(lock) = mvcc.lock {
let mut lock_info = MvccLock::default();
let op = match lock.lock_type {
LockType::Put => Op::Put,
LockType::Delete => Op::Del,
LockType::Lock => Op::Lock,
LockType::Pessimistic => Op::PessimisticLock,
};
lock_info.set_type(op);
lock_info.set_start_ts(lock.ts);
lock_info.set_primary(lock.primary);
lock_info.set_short_value(lock.short_value.unwrap_or_default());
mvcc_info.set_lock(lock_info);
}
let vv = extract_2pc_values(mvcc.values);
let vw = extract_2pc_writes(mvcc.writes);
mvcc_info.set_writes(vw.into());
mvcc_info.set_values(vv.into());
mvcc_info
}
fn extract_2pc_values(res: Vec<(u64, Value)>) -> Vec<MvccValue> {
res.into_iter()
.map(|(start_ts, value)| {
let mut value_info = MvccValue::default();
value_info.set_start_ts(start_ts);
value_info.set_value(value);
value_info
})
.collect()
}
fn extract_2pc_writes(res: Vec<(u64, MvccWrite)>) -> Vec<kvrpcpb::MvccWrite> {
res.into_iter()
.map(|(commit_ts, write)| {
let mut write_info = kvrpcpb::MvccWrite::default();
let op = match write.write_type {
WriteType::Put => Op::Put,
WriteType::Delete => Op::Del,
WriteType::Lock => Op::Lock,
WriteType::Rollback => Op::Rollback,
};
write_info.set_type(op);
write_info.set_start_ts(write.start_ts);
write_info.set_commit_ts(commit_ts);
write_info.set_short_value(write.short_value.unwrap_or_default());
write_info
})
.collect()
}
fn extract_key_errors(res: storage::Result<Vec<storage::Result<()>>>) -> Vec<KeyError> {
match res {
Ok(res) => res
.into_iter()
.filter_map(|x| match x {
Err(e) => Some(extract_key_error(&e)),
Ok(_) => None,
})
.collect(),
Err(e) => vec![extract_key_error(&e)],
}
}
mod batch_commands_response {
pub type Response = kvproto::tikvpb::BatchCommandsResponseResponse;
pub mod response {
pub type Cmd = kvproto::tikvpb::BatchCommandsResponse_Response_oneof_cmd;
}
}
mod batch_commands_request {
pub type Request = kvproto::tikvpb::BatchCommandsRequestRequest;
pub mod request {
pub type Cmd = kvproto::tikvpb::BatchCommandsRequest_Request_oneof_cmd;
}
}
#[cfg(test)]
mod tests {
use std::thread;
use tokio_sync::oneshot;
use super::*;
use crate::storage;
use crate::storage::mvcc::Error as MvccError;
use crate::storage::txn::Error as TxnError;
#[test]
fn test_extract_key_error_write_conflict() {
let start_ts = 110;
let conflict_start_ts = 108;
let conflict_commit_ts = 109;
let key = b"key".to_vec();
let primary = b"primary".to_vec();
let case = storage::Error::from(TxnError::from(MvccError::WriteConflict {
start_ts,
conflict_start_ts,
conflict_commit_ts,
key: key.clone(),
primary: primary.clone(),
}));
let mut expect = KeyError::default();
let mut write_conflict = WriteConflict::default();
write_conflict.set_start_ts(start_ts);
write_conflict.set_conflict_ts(conflict_start_ts);
write_conflict.set_conflict_commit_ts(conflict_commit_ts);
write_conflict.set_key(key);
write_conflict.set_primary(primary);
expect.set_conflict(write_conflict);
expect.set_retryable(format!("{:?}", case));
let got = extract_key_error(&case);
assert_eq!(got, expect);
}
#[test]
fn test_poll_future_notify_with_slow_source() {
let (tx, rx) = oneshot::channel::<usize>();
let (signal_tx, signal_rx) = oneshot::channel();
thread::Builder::new()
.name("source".to_owned())
.spawn(move || {
signal_rx.wait().unwrap();
tx.send(100).unwrap();
})
.unwrap();
let (tx1, rx1) = oneshot::channel::<usize>();
poll_future_notify(
rx.map(move |i| {
assert_eq!(thread::current().name(), Some("source"));
tx1.send(i + 100).unwrap();
})
.map_err(|_| ()),
);
signal_tx.send(()).unwrap();
assert_eq!(rx1.wait().unwrap(), 200);
}
#[test]
fn test_poll_future_notify_with_slow_poller() {
let (tx, rx) = oneshot::channel::<usize>();
let (signal_tx, signal_rx) = oneshot::channel();
thread::Builder::new()
.name("source".to_owned())
.spawn(move || {
tx.send(100).unwrap();
signal_tx.send(()).unwrap();
})
.unwrap();
let (tx1, rx1) = oneshot::channel::<usize>();
signal_rx.wait().unwrap();
poll_future_notify(
rx.map(move |i| {
assert_ne!(thread::current().name(), Some("source"));
tx1.send(i + 100).unwrap();
})
.map_err(|_| ()),
);
assert_eq!(rx1.wait().unwrap(), 200);
}
}
| 36.101459 | 115 | 0.542085 |
9cfd6a6e5cd6427df42f0c93f9fa5ea0168695a0 | 17,282 | use crate::gl_render;
use crate::simulator::{Simulator, Particle};
use crate::window;
use gl;
use freetype;
use std::ffi::CString;
impl Simulator {
pub fn draw_particles(
&self,
buf_id: &gl::types::GLuint,
vao: &gl::types::GLuint,
buf_size: &mut usize,
window_info: &window::WindowData,
) -> usize {
let mut positions: Vec<f32> = Vec::with_capacity(self.particle_list.borrow().len());
for p in self.particle_list.borrow().iter() {
let pos = p.get_pos();
positions.push(pos.x / window_info.x_range);
positions.push(pos.y / window_info.y_range);
positions.push(0.0);
}
if positions.len() > *buf_size {
unsafe {
window_info.gl.BindVertexArray(*vao);
window_info.gl.BindBuffer(gl::ARRAY_BUFFER, *buf_id);
window_info.gl.VertexAttribPointer(
0,
positions.len() as gl::types::GLint,
gl::FLOAT,
gl::FALSE,
(3 * std::mem::size_of::<f32>()) as gl::types::GLint,
std::ptr::null(),
);
window_info.gl.BufferData(
gl::ARRAY_BUFFER,
(positions.len() * std::mem::size_of::<f32>()) as gl::types::GLsizeiptr,
positions.as_ptr() as *const gl::types::GLvoid,
gl::DYNAMIC_DRAW,
);
}
*buf_size = positions.len();
} else {
unsafe {
window_info.gl.BindVertexArray(*vao);
window_info.gl.BindBuffer(gl::ARRAY_BUFFER, *buf_id);
window_info.gl.VertexAttribPointer(
0,
positions.len() as gl::types::GLint,
gl::FLOAT,
gl::FALSE,
(3 * std::mem::size_of::<f32>()) as gl::types::GLint,
std::ptr::null(),
);
}
unsafe {
window_info.gl.BindBuffer(gl::ARRAY_BUFFER, *buf_id);
window_info.gl.BufferSubData(
gl::ARRAY_BUFFER,
0,
(positions.len() * std::mem::size_of::<f32>()) as gl::types::GLsizeiptr,
positions.as_ptr() as *const gl::types::GLvoid,
);
}
}
if positions.len() != *buf_size {
*buf_size = positions.len() / 3;
}
return positions.len() / 3;
}
pub fn draw_vectors(window_info: &mut window::WindowData) {
if window_info.vectors_require_update {
window_info.vector_program = gl_render::Program::blank_program(&window_info.gl);
window_info.vector_color_storage = Vec::new();
window_info.vector_coord_storage = Vec::new();
window_info.vector_vao = 0;
window_info.vector_vbo = [0, 0, 0];
window_info.vectors_require_update = false;
}
if window_info.vector_program.id() == 0 {
let vertex_shader = gl_render::Shader::from_vertex_source(
&window_info.gl,
&CString::new(include_str!("assets/shaders/vector.vert")).unwrap(),
)
.unwrap();
let fragment_shader = gl_render::Shader::from_frag_source(
&window_info.gl,
&CString::new(include_str!("assets/shaders/vector.frag")).unwrap(),
)
.unwrap();
// vectors
let mut max_mag = 0.0_f32;
let simulator = window_info.simulator.borrow_mut();
window_info.vector_program = gl_render::Program::from_shaders(
&window_info.gl,
&mut [vertex_shader, fragment_shader],
)
.unwrap();
for c in (((-window_info.x_range) as isize)..(window_info.y_range as isize)).step_by(2)
{
for r in
(((-window_info.y_range) as isize)..(window_info.y_range as isize)).step_by(2)
{
let force = Simulator::acceleration_for(
&simulator,
&Particle::new(c as f32, r as f32, 0.0, 0.0),
);
let theta = force.y.atan2(force.x);
let mag = (force.x.powi(2) + force.y.powi(2)).sqrt() as f32;
if mag > max_mag {
max_mag = mag;
}
// starts at a point
window_info
.vector_coord_storage
.push(c as f32 / window_info.x_range);
window_info
.vector_coord_storage
.push(r as f32 / window_info.y_range);
window_info.vector_coord_storage.push(0.0);
window_info.vector_color_storage.push(mag.abs());
// goes at some angle
// radius always == 1
if mag != 0.0 {
window_info.vector_coord_storage.push(
(theta.cos() as f32 * 0.8 / window_info.x_range)
+ (c as f32 / window_info.x_range),
);
window_info.vector_coord_storage.push(
(theta.sin() as f32 * 0.8 / window_info.y_range)
+ (r as f32 / window_info.y_range),
);
} else {
window_info
.vector_coord_storage
.push(c as f32 / window_info.x_range);
window_info
.vector_coord_storage
.push(r as f32 / window_info.y_range);
}
window_info.vector_coord_storage.push(0.0);
window_info.vector_color_storage.push(mag.abs());
}
}
window_info.vec_range = max_mag;
unsafe {
window_info.gl.GenBuffers(3, &mut window_info.vector_vbo[0]);
window_info
.gl
.BindBuffer(gl::ARRAY_BUFFER, window_info.vector_vbo[0]);
window_info.gl.BufferData(
gl::ARRAY_BUFFER,
(window_info.vector_coord_storage.len() * std::mem::size_of::<f32>())
as gl::types::GLsizeiptr,
window_info.vector_coord_storage.as_ptr() as *const gl::types::GLvoid,
gl::DYNAMIC_DRAW,
);
window_info
.gl
.BindBuffer(gl::ARRAY_BUFFER, window_info.vector_vbo[1]);
window_info.gl.BufferData(
gl::ARRAY_BUFFER,
(window_info.vector_color_storage.len() * std::mem::size_of::<f32>())
as gl::types::GLsizeiptr,
window_info.vector_color_storage.as_ptr() as *const gl::types::GLvoid,
gl::STATIC_DRAW,
);
window_info
.gl
.GenVertexArrays(1, &mut window_info.vector_vao);
window_info.gl.BindVertexArray(window_info.vector_vao);
window_info
.gl
.BindBuffer(gl::ARRAY_BUFFER, window_info.vector_vbo[0]);
window_info.gl.EnableVertexAttribArray(0);
window_info.gl.VertexAttribPointer(
0,
3,
gl::FLOAT,
gl::FALSE,
(3 * std::mem::size_of::<f32>()) as gl::types::GLint,
std::ptr::null(),
);
window_info
.gl
.BindBuffer(gl::ARRAY_BUFFER, window_info.vector_vbo[1]);
window_info.gl.EnableVertexAttribArray(1);
window_info.gl.VertexAttribPointer(
1,
1,
gl::FLOAT,
gl::FALSE,
std::mem::size_of::<f32>() as gl::types::GLint,
std::ptr::null(),
);
window_info.gl.BindBuffer(gl::ARRAY_BUFFER, 0);
window_info.gl.BindVertexArray(0);
}
}
window_info.vector_program.set_used();
let range_location = unsafe {
window_info.gl.GetUniformLocation(
window_info.vector_program.id(),
(CString::new("in_range").unwrap()).as_ptr(),
)
};
unsafe {
window_info.gl.LineWidth(2.0);
window_info
.gl
.Uniform1f(range_location, window_info.vec_range);
window_info.gl.BindVertexArray(window_info.vector_vao);
window_info.gl.DrawArrays(
gl::LINES,
0,
(window_info.vector_coord_storage.len() / 3) as i32,
);
}
}
}
pub fn draw_gridlines(window_info: &mut window::WindowData) {
if window_info.gridline_program.id() == 0 {
let vertex_shader = gl_render::Shader::from_vertex_source(
&window_info.gl,
&CString::new(include_str!("assets/shaders/line.vert")).unwrap(),
)
.unwrap();
let fragment_shader = gl_render::Shader::from_frag_source(
&window_info.gl,
&CString::new(include_str!("assets/shaders/line.frag")).unwrap(),
)
.unwrap();
// gridlines
window_info.gridline_program = gl_render::Program::from_shaders(
&window_info.gl,
&mut [vertex_shader, fragment_shader],
)
.unwrap();
for v in ((-window_info.x_range) as isize)..(window_info.x_range as isize) {
window_info
.gridline_vec
.push((v as f32) / window_info.x_range);
window_info.gridline_vec.push(1.0);
window_info.gridline_vec.push(0.0);
window_info
.gridline_vec
.push(v as f32 / window_info.x_range);
window_info.gridline_vec.push(-1.0);
window_info.gridline_vec.push(0.0);
}
for v in ((-window_info.y_range) as isize + 1)..(window_info.y_range as isize) {
window_info.gridline_vec.push(-1.0);
window_info
.gridline_vec
.push(v as f32 / window_info.y_range);
window_info.gridline_vec.push(0.0);
window_info.gridline_vec.push(1.0);
window_info
.gridline_vec
.push((v as f32) / window_info.y_range);
window_info.gridline_vec.push(0.0);
}
window_info.gridline_vec.extend(&[-1.0, 0.0, 0.0]);
window_info.gridline_vec.extend(&[1.0, 0.0, 0.0]);
window_info.gridline_vec.extend(&[0.0, -1.0, 0.0]);
window_info.gridline_vec.extend(&[0.0, 1.0, 0.0]);
unsafe {
window_info.gl.GenBuffers(1, &mut window_info.gridline_vbo);
window_info
.gl
.BindBuffer(gl::ARRAY_BUFFER, window_info.gridline_vbo);
window_info.gl.BufferData(
gl::ARRAY_BUFFER,
(window_info.gridline_vec.len() * std::mem::size_of::<f32>())
as gl::types::GLsizeiptr,
window_info.gridline_vec.as_ptr() as *const gl::types::GLvoid,
gl::STATIC_DRAW,
);
window_info
.gl
.GenVertexArrays(1, &mut window_info.gridline_vao);
window_info.gl.BindVertexArray(window_info.gridline_vao);
window_info.gl.EnableVertexAttribArray(0);
window_info.gl.VertexAttribPointer(
0,
3,
gl::FLOAT,
gl::FALSE,
(3 * std::mem::size_of::<f32>()) as gl::types::GLint,
std::ptr::null(),
);
}
}
window_info.gridline_program.set_used();
let vertex_color_location = unsafe {
window_info.gl.GetUniformLocation(
window_info.gridline_program.id(),
(CString::new("inColor").unwrap()).as_ptr(),
)
};
unsafe {
window_info
.gl
.Uniform4f(vertex_color_location, 0.5, 0.5, 0.5, 1.0);
window_info.gl.LineWidth(1.0);
window_info.gl.BindVertexArray(window_info.gridline_vao);
window_info
.gl
.BindBuffer(gl::ARRAY_BUFFER, window_info.gridline_vbo);
window_info.gl.DrawArrays(
gl::LINES,
0,
(window_info.gridline_vec.len() / 3) as i32 - 4,
);
window_info
.gl
.Uniform4f(vertex_color_location, 0.0, 0.0, 0.0, 1.0);
window_info.gl.LineWidth(3.0);
window_info.gl.DrawArrays(
gl::LINES,
(window_info.gridline_vec.len() / 3) as i32 - 4,
4,
);
}
}
pub fn render_text(
gl: &gl::Gl,
text: &str,
font: &freetype::face::Face,
program: &gl_render::Program,
vao: &mut gl::types::GLuint,
vbo: &mut gl::types::GLuint,
window_info: &mut window::WindowData,
mut x: f32,
y: f32,
scale: f32,
) {
let glyphs = (&text).as_bytes();
unsafe { gl.PixelStorei(gl::UNPACK_ALIGNMENT, 1) };
// initialization
program.set_used();
unsafe {
// gl.Uniform3f(gl.GetUniformLocation(program.id(), CString::new("textColor").unwrap().as_ptr()), 0.0, 0.0, 0.0);
// column-major
// let projection = vec![0.0025, 0.0, 0.0, -1.0, /* 2 */ 0.0, 0.003333, 0.0, -1.0, /* 3 */ 0.0, 0.0, -1.0, 0.0, /* 4 */ 0.0, 0.0, 0.0, 1.0];
// row-major
let projection: Vec<f32> = vec![0.002500, 0.000000, 0.000000, 0.000000, 0.000000, 0.003333, 0.000000, 0.000000, 0.000000, 0.000000, -1.000000, 0.000000, -1.000000, -1.000000, 0.000000, 1.000000];
gl.UniformMatrix4fv(gl.GetUniformLocation(program.id(), CString::new("projection").unwrap().as_ptr()), 1, gl::FALSE, projection.as_ptr() as *mut f32);
gl.ActiveTexture(gl::TEXTURE0);
gl.BindVertexArray(*vao);
}
for glyph in glyphs {
if !window_info.character_map.contains_key(&glyph) {
font.load_char(*glyph as usize, freetype::face::LoadFlag::RENDER).unwrap();
let g = font.glyph();
let mut texture: gl::types::GLuint = 0;
unsafe {
gl.GenTextures(1, &mut texture);
gl.BindTexture(gl::TEXTURE_2D, texture);
gl.TexImage2D(
gl::TEXTURE_2D,
0,
gl::RED as i32,
g.bitmap().width(),
g.bitmap().rows(),
0,
gl::RED,
gl::UNSIGNED_BYTE,
g.bitmap().buffer().as_ptr() as *const gl::types::GLvoid,
);
gl.TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as i32);
gl.TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as i32);
gl.TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as i32);
gl.TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
}
window_info.character_map.insert(*glyph, window::Character {
texture_id: texture,
size_x: g.bitmap().width(),
size_y: g.bitmap().rows(),
bearing_x: g.bitmap_left(),
bearing_y: g.bitmap_top(),
advance: g.advance().x.into()
});
}
let c = window_info.character_map.get(&glyph).unwrap();
let x_pos = x + (c.bearing_x as f32) * scale;
let y_pos = y - (c.size_y - c.bearing_y) as f32 * scale;
let w = c.size_x as f32 * scale;
let h = c.size_y as f32 * scale;
// let x_pos = 0.0;
// let y_pos = 0.0;
// let w = 50.0;
// let h = 50.0;
let vertices: Vec<f32> = vec![
x_pos, y_pos + h, 0.0, 0.0,
x_pos, y_pos, 0.0, 1.0,
x_pos + w, y_pos, 1.0, 1.0,
x_pos, y_pos + h, 0.0, 0.0,
x_pos + w, y_pos, 1.0, 1.0,
x_pos + w, y_pos + h, 1.0, 0.0
];
unsafe {
gl.BindTexture(gl::TEXTURE_2D, c.texture_id);
gl.BindBuffer(gl::ARRAY_BUFFER, *vbo);
gl.BufferSubData(gl::ARRAY_BUFFER, 0, (vertices.len() * std::mem::size_of::<f32>()) as isize, vertices.as_ptr() as *const gl::types::GLvoid);
gl.BindBuffer(gl::ARRAY_BUFFER, 0);
gl.DrawArrays(gl::TRIANGLES, 0, 6);
}
x += (c.advance / 64 ) as f32 * scale;
}
unsafe {
gl.BindVertexArray(0);
gl.BindTexture(gl::TEXTURE_2D, 0);
}
}
| 39.011287 | 203 | 0.492304 |
9c9d4fde1ec19c1a3dc3613aa6b519da1d9dd4b9 | 1,827 | use log::{error, LevelFilter};
use serde::{Deserialize, Deserializer};
use crate::config::{failure_default, LOG_TARGET_CONFIG};
/// Debugging options.
#[serde(default)]
#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Debug {
#[serde(default = "default_log_level", deserialize_with = "deserialize_log_level")]
pub log_level: LevelFilter,
#[serde(deserialize_with = "failure_default")]
pub print_events: bool,
/// Keep the log file after quitting.
#[serde(deserialize_with = "failure_default")]
pub persistent_logging: bool,
/// Should show render timer.
#[serde(deserialize_with = "failure_default")]
pub render_timer: bool,
/// Record ref test.
#[serde(skip)]
pub ref_test: bool,
}
impl Default for Debug {
fn default() -> Self {
Self {
log_level: default_log_level(),
print_events: Default::default(),
persistent_logging: Default::default(),
render_timer: Default::default(),
ref_test: Default::default(),
}
}
}
fn default_log_level() -> LevelFilter {
LevelFilter::Warn
}
fn deserialize_log_level<'a, D>(deserializer: D) -> Result<LevelFilter, D::Error>
where
D: Deserializer<'a>,
{
Ok(match failure_default::<D, String>(deserializer)?.to_lowercase().as_str() {
"off" | "none" => LevelFilter::Off,
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
"trace" => LevelFilter::Trace,
level => {
error!(
target: LOG_TARGET_CONFIG,
"Problem with config: invalid log level {}; using level Warn", level
);
default_log_level()
},
})
}
| 28.107692 | 87 | 0.610837 |
8fc89b2ee50305fbc0f2464fe004b34966ece3b4 | 12,681 | use super::{
array::SingleArray, Access, BuildError, DimElement, EmptyToNone, Field, ModifiedWriteValues,
Name, ReadAction, RegisterProperties, SvdError, ValidateLevel, WriteConstraint,
};
/// A single register or array of registers. A register is a named, programmable resource that belongs to a [peripheral](crate::Peripheral).
pub type Register = SingleArray<RegisterInfo>;
/// Errors from [`RegisterInfo::validate`]
#[derive(Clone, Debug, PartialEq, Eq, thiserror::Error)]
pub enum Error {
/// Register had no fields, but specified a `<fields>` tag.
#[error("Register have `fields` tag, but it is empty")]
EmptyFields,
}
/// A register is a named, programmable resource that belongs to a [peripheral](crate::Peripheral).
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(rename_all = "camelCase")
)]
#[derive(Clone, Debug, PartialEq)]
#[non_exhaustive]
pub struct RegisterInfo {
/// String to identify the register.
/// Register names are required to be unique within the scope of a peripheral
pub name: String,
/// Specifies a register name without the restrictions of an ANSI C identifier.
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub display_name: Option<String>,
/// String describing the details of the register
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub description: Option<String>,
/// Specifies a group name associated with all alternate register that have the same name
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub alternate_group: Option<String>,
/// This tag can reference a register that has been defined above to
/// current location in the description and that describes the memory location already
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub alternate_register: Option<String>,
/// Define the address offset relative to the enclosing element
pub address_offset: u32,
/// Specifies register size, access permission and reset value
#[cfg_attr(feature = "serde", serde(flatten))]
pub properties: RegisterProperties,
/// Specifies the write side effects
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub modified_write_values: Option<ModifiedWriteValues>,
/// Specifies the subset of allowed write values
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub write_constraint: Option<WriteConstraint>,
/// If set, it specifies the side effect following a read operation.
/// If not set, the register is not modified
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub read_action: Option<ReadAction>,
/// `None` indicates that the `<fields>` node is not present
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub fields: Option<Vec<Field>>,
/// Specify the register name from which to inherit data.
/// Elements specified subsequently override inherited values
#[cfg_attr(
feature = "serde",
serde(default, skip_serializing_if = "Option::is_none")
)]
pub derived_from: Option<String>,
}
/// Builder for [`RegisterInfo`]
#[derive(Clone, Debug, Default, PartialEq)]
pub struct RegisterInfoBuilder {
name: Option<String>,
display_name: Option<String>,
description: Option<String>,
alternate_group: Option<String>,
alternate_register: Option<String>,
address_offset: Option<u32>,
properties: RegisterProperties,
modified_write_values: Option<ModifiedWriteValues>,
write_constraint: Option<WriteConstraint>,
read_action: Option<ReadAction>,
fields: Option<Vec<Field>>,
derived_from: Option<String>,
}
impl From<RegisterInfo> for RegisterInfoBuilder {
fn from(r: RegisterInfo) -> Self {
Self {
name: Some(r.name),
display_name: r.display_name,
description: r.description,
alternate_group: r.alternate_group,
alternate_register: r.alternate_register,
address_offset: Some(r.address_offset),
properties: r.properties,
modified_write_values: r.modified_write_values,
write_constraint: r.write_constraint,
read_action: r.read_action,
fields: r.fields,
derived_from: r.derived_from,
}
}
}
impl RegisterInfoBuilder {
/// Set the name of the register.
pub fn name(mut self, value: String) -> Self {
self.name = Some(value);
self
}
/// Set the display name of the register.
pub fn display_name(mut self, value: Option<String>) -> Self {
self.display_name = value;
self
}
/// Set the description of the register.
pub fn description(mut self, value: Option<String>) -> Self {
self.description = value;
self
}
/// Set the alternate group of the register.
pub fn alternate_group(mut self, value: Option<String>) -> Self {
self.alternate_group = value;
self
}
/// Set the alternate register of the register.
pub fn alternate_register(mut self, value: Option<String>) -> Self {
self.alternate_register = value;
self
}
/// Set the address offset of the register.
pub fn address_offset(mut self, value: u32) -> Self {
self.address_offset = Some(value);
self
}
/// Set the properties of the register.
pub fn properties(mut self, value: RegisterProperties) -> Self {
self.properties = value;
self
}
/// Set the size of the register.
pub fn size(mut self, value: Option<u32>) -> Self {
self.properties.size = value;
self
}
/// Set the access of the register.
pub fn access(mut self, value: Option<Access>) -> Self {
self.properties.access = value;
self
}
/// Set the reset value of the register.
pub fn reset_value(mut self, value: Option<u64>) -> Self {
self.properties.reset_value = value;
self
}
/// Set the reset mask of the register.
pub fn reset_mask(mut self, value: Option<u64>) -> Self {
self.properties.reset_mask = value;
self
}
/// Set the modified write values of the register.
pub fn modified_write_values(mut self, value: Option<ModifiedWriteValues>) -> Self {
self.modified_write_values = value;
self
}
/// Set the write constraint of the register.
pub fn write_constraint(mut self, value: Option<WriteConstraint>) -> Self {
self.write_constraint = value;
self
}
/// Set the read action of the register.
pub fn read_action(mut self, value: Option<ReadAction>) -> Self {
self.read_action = value;
self
}
/// Set the fields of the register.
pub fn fields(mut self, value: Option<Vec<Field>>) -> Self {
self.fields = value;
self
}
/// Set the derived_from attribute of the register.
pub fn derived_from(mut self, value: Option<String>) -> Self {
self.derived_from = value;
self
}
/// Validate and build a [`RegisterInfo`].
pub fn build(self, lvl: ValidateLevel) -> Result<RegisterInfo, SvdError> {
let mut reg = RegisterInfo {
name: self
.name
.ok_or_else(|| BuildError::Uninitialized("name".to_string()))?,
display_name: self.display_name,
description: self.description,
alternate_group: self.alternate_group,
alternate_register: self.alternate_register,
address_offset: self
.address_offset
.ok_or_else(|| BuildError::Uninitialized("address_offset".to_string()))?,
properties: self.properties.build(lvl)?,
modified_write_values: self.modified_write_values,
write_constraint: self.write_constraint,
read_action: self.read_action,
fields: self.fields,
derived_from: self.derived_from,
};
if !lvl.is_disabled() {
reg.validate(lvl)?;
}
Ok(reg)
}
}
impl RegisterInfo {
/// Make a builder for [`RegisterInfo`]
pub fn builder() -> RegisterInfoBuilder {
RegisterInfoBuilder::default()
}
/// Construct single [`Register`]
pub const fn single(self) -> Register {
Register::Single(self)
}
/// Construct [`Register`] array
pub const fn array(self, dim: DimElement) -> Register {
Register::Array(self, dim)
}
/// Modify an existing [`RegisterInfo`] based on a [builder](RegisterInfoBuilder).
pub fn modify_from(
&mut self,
builder: RegisterInfoBuilder,
lvl: ValidateLevel,
) -> Result<(), SvdError> {
if let Some(name) = builder.name {
self.name = name;
}
if builder.display_name.is_some() {
self.display_name = builder.display_name.empty_to_none();
}
if builder.description.is_some() {
self.description = builder.description.empty_to_none();
}
if builder.alternate_group.is_some() {
self.alternate_group = builder.alternate_group.empty_to_none();
}
if builder.alternate_register.is_some() {
self.alternate_register = builder.alternate_register.empty_to_none();
}
if let Some(address_offset) = builder.address_offset {
self.address_offset = address_offset;
}
if builder.derived_from.is_some() {
self.derived_from = builder.derived_from;
self.fields = None;
self.properties = RegisterProperties::default();
self.modified_write_values = None;
self.write_constraint = None;
} else {
self.properties.modify_from(builder.properties, lvl)?;
if builder.modified_write_values.is_some() {
self.modified_write_values = builder.modified_write_values;
}
if builder.write_constraint.is_some() {
self.write_constraint = builder.write_constraint;
}
if builder.read_action.is_some() {
self.read_action = builder.read_action;
}
if builder.fields.is_some() {
self.fields = builder.fields.empty_to_none();
}
}
if !lvl.is_disabled() {
self.validate(lvl)
} else {
Ok(())
}
}
/// Validate the [`RegisterInfo`]
pub fn validate(&mut self, lvl: ValidateLevel) -> Result<(), SvdError> {
if lvl.is_strict() {
super::check_dimable_name(&self.name, "name")?;
if let Some(name) = self.alternate_group.as_ref() {
super::check_name(name, "alternateGroup")?;
}
if let Some(name) = self.alternate_register.as_ref() {
super::check_dimable_name(name, "alternateRegister")?;
}
}
if let Some(name) = self.derived_from.as_ref() {
if lvl.is_strict() {
super::check_derived_name(name, "derivedFrom")?;
}
} else if let Some(fields) = self.fields.as_ref() {
if fields.is_empty() && lvl.is_strict() {
return Err(Error::EmptyFields.into());
}
}
Ok(())
}
/// Returns iterator over child fields
pub fn fields(&self) -> std::slice::Iter<Field> {
match &self.fields {
Some(fields) => fields.iter(),
None => [].iter(),
}
}
/// Returns mutable iterator over child fields
pub fn fields_mut(&mut self) -> std::slice::IterMut<Field> {
match &mut self.fields {
Some(fields) => fields.iter_mut(),
None => [].iter_mut(),
}
}
/// Get field by name
pub fn get_field(&self, name: &str) -> Option<&Field> {
self.fields().find(|f| f.name == name)
}
/// Get mutable field by name
pub fn get_mut_field(&mut self, name: &str) -> Option<&mut Field> {
self.fields_mut().find(|f| f.name == name)
}
}
impl Name for RegisterInfo {
fn name(&self) -> &str {
&self.name
}
}
| 34.553134 | 140 | 0.61186 |
2892ff30fa450d4ab577d7b211fc6cdab4e58b78 | 1,483 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::UNUSED2 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
| 22.815385 | 59 | 0.496291 |
91fe0d0aef7416f385d1ef3337d4dd1aae05ae52 | 31,885 | //! Dynamic hierarchy for trees and forests.
pub(crate) mod traverse;
use core::fmt;
use alloc::vec::Vec;
use crate::dynamic::forest::StructureError;
use crate::dynamic::hierarchy::traverse::AncestorsTraverser;
use crate::dynamic::{InsertAs, InternalNodeId};
/// A forest without custom data tied to nodes.
#[derive(Debug, Clone)]
pub(crate) struct Hierarchy<Id> {
/// Neighbors storage.
neighbors: Vec<Neighbors<Id>>,
}
impl<Id: InternalNodeId> Hierarchy<Id> {
/// Creates a new root node.
///
/// # Panics
///
/// Panics if the node ID overflows.
pub(crate) fn create_root(&mut self) -> Id {
let new_id = Id::from_usize(self.neighbors.len())
.expect("[precondition] node ID overflowed presumably due to too many node creations");
self.neighbors.push(Neighbors::new_root(new_id));
new_id
}
/// Returns a reference to the neighbors for the node if the node is alive.
///
/// Returns `None` if the node ID is invalid or the node has already been removed.
#[must_use]
pub(crate) fn neighbors(&self, id: Id) -> Option<&Neighbors<Id>> {
self.neighbors.get(id.to_usize()).filter(|v| v.is_alive())
}
/// Returns a mutable reference to the neighbors for the node if the node is alive.
///
/// Returns `None` if the node ID is invalid or the node has already been removed.
#[must_use]
pub(crate) fn neighbors_mut(&mut self, id: Id) -> Option<&mut Neighbors<Id>> {
self.neighbors
.get_mut(id.to_usize())
.filter(|v| v.is_alive())
}
/// Returns true if the ID is valid inside this forest.
#[must_use]
pub(crate) fn is_valid(&self, id: Id) -> bool {
id.to_usize() < self.neighbors.len()
}
/// Returns true if the node is alive.
#[must_use]
pub(crate) fn is_alive(&self, id: Id) -> bool {
self.neighbors
.get(id.to_usize())
.map_or(false, |v| v.is_alive())
}
/// Connects the given adjacent neighbors and updates fields properly.
///
/// This function connects the given three nodes and update fields to make
/// them consistent.
///
/// ```text
/// parent
/// / \
/// / \
/// prev -> next
/// ```
///
/// More precisely, the fields below will be updated:
///
/// * `parent->first_child`,
/// + Updated if `prev_child` is `None`, i.e. when `next_child` will be
/// the first child or the parent have no child.
/// * `prev_child->parent`,
/// * `prev_child->next_sibling`,
/// * `next_child->parent`, and
/// * `next_child->prev_sibling_cyclic`.
/// + `prev_sibling` is used if available.
/// + If `next_child` will be the first child of the parent, the last
/// child is used.
/// + Otherwise (if both `prev_sibling` and `parent` is `None`),
/// `next_child` is used since this means that `next_child` is the
/// root of a tree.
///
/// In order to update `prev_sibling_cyclic` of the first sibling,
/// **nodes should be connected in order** and the last node should be
/// updated at last.
///
/// # Panics
///
/// * Panics if the `parent` is `None` while both of `prev_child` and
/// `next_child` are `Some`, since nodes cannot have siblings without
/// having a parent.
/// * Panics if `prev_child` and `next_child` are both `Some(_)` and are
/// identical, since a node cannot be adjacent sibling of itself.
fn connect_triangle(
&mut self,
parent: Option<Id>,
prev_child: Option<Id>,
next_child: Option<Id>,
) {
if parent.is_none() && prev_child.is_some() && next_child.is_some() {
panic!("[precondition] nodes cannot have siblings without having a parent");
}
if prev_child
.zip(next_child)
.map_or(false, |(prev, next)| prev == next)
{
panic!("[precondition] a node cannot be adjacent sibling of itself");
}
if let Some(prev_child) = prev_child {
let prev_child_nbs = self
.neighbors_mut(prev_child)
.expect("[precondition] the given `prev_child` node must be alive");
// Set prev->parent.
prev_child_nbs.parent = parent;
// Set prev->next.
prev_child_nbs.next_sibling = next_child;
}
if let Some(next_child) = next_child {
let next_child_prev_cyclic = match prev_child {
// If the real previous child exist, just use it.
Some(prev_child) => prev_child,
None => match parent {
// If `prev_child` is `None` but the parent is available,
// then `next_child` is the first child, and
// `prev_sibling_cyclic` should be the last child of the
// parent.
// If the parent does not have any children, then
// `next_child` will be the first child.
Some(parent) => self
.neighbors(parent)
.expect("[precondition] the given `parent` node must be alive")
.last_child(self)
.unwrap_or(next_child),
// `next_child` is a root of the tree.
None => next_child,
},
};
let next_child_nbs = self
.neighbors_mut(next_child)
.expect("[precondition] the given `next_child` node must be alive");
// Set next->parent.
next_child_nbs.parent = parent;
// Set next->prev.
next_child_nbs.prev_sibling_cyclic = Some(next_child_prev_cyclic);
}
// Neighbors of the parent must be modified after `next_child`, since
// setting `next_child` requires last child of the non-modified parent.
if let Some(parent) = parent {
if prev_child.is_none() {
let parent_nbs = self
.neighbors_mut(parent)
.expect("[precondition] the given `parent` node must be alive");
// `next_child` is the first child (if available).
parent_nbs.first_child = next_child;
} else if next_child.is_none() {
// `prev_child` has no next sibling. This means that
// `prev_child` is the last child of the parent.
let first_child = self
.neighbors(parent)
.expect("[precondition] the given `parent` node must be alive")
.first_child()
.expect("[consistency] `parent` must have a child including `prev_child`");
if let Some(prev_child) = prev_child {
self.neighbors_mut(first_child)
.expect("[precondition] the first child of the `parent` must be alive")
.prev_sibling_cyclic = Some(prev_child);
}
}
}
}
/// Detaches the tree from neighbors.
///
/// Tree structure under the given node will be preserved.
/// The detached node will become a root node.
///
/// If you want to detach not subtree but single node, use
/// [`detach_single`][`Self::detach_single`] method.
///
/// ```text
/// Before `detach`:
///
/// root
/// |-- 0
/// |-- 1
/// | |-- 1-0
/// | |-- 1-1
/// | `-- 1-2
/// `-- 2
///
/// After `detach`:
///
/// root
/// |-- 0
/// `-- 2
///
/// 1
/// |-- 1-0
/// |-- 1-1
/// `-- 1-2
/// ```
///
/// # Panics
///
/// Panics if the node is not alive.
pub(crate) fn detach(&mut self, node: Id) {
let nbs = self
.neighbors(node)
.expect("[precondition] the node must be alive");
// If the node has no parent, the tree can be considered already detached.
let parent = match nbs.parent() {
Some(v) => v,
None => return,
};
let prev = nbs.prev_sibling(self);
let next = nbs.next_sibling();
// Connect the siblings before and after the node.
self.connect_triangle(Some(parent), prev, next);
// Reset the neighbors info of the node.
let mut nbs = self
.neighbors_mut(node)
.expect("[precondition] the node must be alive");
nbs.parent = None;
nbs.next_sibling = None;
nbs.prev_sibling_cyclic = Some(node);
}
/// Detaches the node from neighbors and make it orphan root.
///
/// Children are inserted to the place where the detached node was.
///
/// If you want to detach not single node but subtree, use
/// [`detach`][`Self::detach`] method.
///
/// ```text
/// Before `detach_single`:
///
/// root
/// |-- 0
/// |-- 1
/// | |-- 1-0
/// | |-- 1-1
/// | `-- 1-2
/// `-- 2
///
/// After `detach_single`:
///
/// root
/// |-- 0
/// |-- 1-0
/// |-- 1-1
/// |-- 1-2
/// `-- 2
///
/// 1
/// ```
///
/// # Errors
///
/// Returns [`StructureError::SiblingsWithoutParent`] when the node has
/// multiple children but has no parent.
///
/// # Panics
///
/// Panics if the node is not alive.
pub(crate) fn detach_single(&mut self, node: Id) -> Result<(), StructureError> {
let nbs = self
.neighbors(node)
.expect("[precondition] the node must be alive");
let (first_child, last_child) = match nbs.first_last_child(self) {
Some(v) => v,
None => {
// No children.
self.detach(node);
return Ok(());
}
};
let parent = match nbs.parent() {
Some(v) => v,
None => {
if first_child != last_child {
return Err(StructureError::SiblingsWithoutParent);
}
// Single child and no parent.
// The single child becomes the new root.
self.detach(first_child);
// Now the node has no children.
self.detach(node);
return Ok(());
}
};
let prev = nbs.prev_sibling(self);
let next = nbs.next_sibling();
// Connect the siblings before and after the node.
self.connect_triangle(Some(parent), prev, next);
// Insert the children between the prev and next siblings.
SiblingsRange::new(self, first_child, last_child)
.transplant(self, parent, prev, next)
.expect("[consistency] structure being created must be valid");
// Reset the neighbors info of the node.
let mut nbs = self
.neighbors_mut(node)
.expect("[precondition] the node must be alive");
nbs.parent = None;
nbs.next_sibling = None;
nbs.prev_sibling_cyclic = Some(node);
debug_assert_eq!(
nbs.first_child(),
None,
"[consistency] the children have been transplanted"
);
Ok(())
}
/// Detaches `node` and inserts the given node to the target position.
///
/// # Panics
///
/// Panics if any of the given nodes (including the anchor of the destination)
/// are not alive.
///
/// # Errors
///
/// * [`StructureError::AncestorDescendantLoop`]
/// + In case `dest` is `FirstChildOf(node)` or `LastChildOf(node)`.
/// * [`StructureError::UnorderableSiblings`]
/// + In case `dest` is `PreviousSiblingOf(node)` or `NextSiblingOf(node)`.
/// * [`StructureError::SiblingsWithoutParent`]
/// + In case `dest` is `PreviousSiblingOf(v)` or `NextSiblingOf(v)`, and
/// `v` does not have a parent.
pub(crate) fn insert(&mut self, node: Id, dest: InsertAs<Id>) -> Result<(), StructureError> {
match dest {
InsertAs::FirstChildOf(parent) => self.prepend_child(node, parent),
InsertAs::LastChildOf(parent) => self.append_child(node, parent),
InsertAs::PreviousSiblingOf(next) => self.insert_before(node, next),
InsertAs::NextSiblingOf(prev) => self.insert_after(node, prev),
}
}
/// Detaches and prepends the given node to children of `self` as the first child.
///
/// # Errors
///
/// Returns [`StructureError::AncestorDescendantLoop`] error when
/// `new_first_child` is `parent` or its ancestor.
///
/// # Panics
///
/// Panics if any of the given nodes are not alive.
fn prepend_child(&mut self, new_first_child: Id, parent: Id) -> Result<(), StructureError> {
let old_first_child = self
.neighbors(parent)
.expect("[precondition] the node must be alive")
.first_child();
SiblingsRange::with_single_toplevel(self, new_first_child).transplant(
self,
parent,
None,
old_first_child,
)
}
/// Detaches and appends the given node to children of `self` as the last child.
///
/// # Errors
///
/// Returns [`StructureError::AncestorDescendantLoop`] error when
/// `new_last_child` is `parent` or its ancestor.
///
/// # Panics
///
/// Panics if any of the given nodes are not alive.
fn append_child(&mut self, new_last_child: Id, parent: Id) -> Result<(), StructureError> {
let old_last_child = self
.neighbors(parent)
.expect("[precondition] the parent must be alive")
.last_child(self);
// `new_last_child` is an independent tree, so transplanting won't fail.
SiblingsRange::with_single_toplevel(self, new_last_child).transplant(
self,
parent,
old_last_child,
None,
)
}
/// Detaches and inserts the given node as the previous sibling of `next_sibling`.
///
/// # Errors
///
/// Returns [`StructureError::UnorderableSiblings`] error when `node` and
/// `next_sibling` are identical.
///
/// # Panics
///
/// Panics if any of the given nodes are not alive.
/// Panics if the `next_sibling` does not have a parent.
fn insert_before(&mut self, node: Id, next_sibling: Id) -> Result<(), StructureError> {
if node == next_sibling {
return Err(StructureError::UnorderableSiblings);
}
let next_nbs = self
.neighbors(next_sibling)
.expect("[precondition] the next sibling must be alive");
let parent = next_nbs
.parent()
.expect("[precondition] the parent must be alive to have siblings");
let prev_sibling = next_nbs.prev_sibling(self);
SiblingsRange::with_single_toplevel(self, node).transplant(
self,
parent,
prev_sibling,
Some(next_sibling),
)
}
/// Detaches and inserts the given node as the next sibling of `prev_sibling`.
///
/// # Errors
///
/// Returns [`StructureError::UnorderableSiblings`] error when `node` and
/// `prev_sibling` are identical.
///
/// # Panics
///
/// Panics if any of the given nodes are not alive.
/// Panics if the `prev_sibling` does not have a parent.
fn insert_after(&mut self, node: Id, prev_sibling: Id) -> Result<(), StructureError> {
if node == prev_sibling {
return Err(StructureError::UnorderableSiblings);
}
let prev_nbs = self
.neighbors(prev_sibling)
.expect("[precondition] the previous sibling must be alive");
let parent = prev_nbs
.parent()
.expect("[precondition] the parent must be alive to have siblings");
let next_sibling = prev_nbs.next_sibling();
SiblingsRange::with_single_toplevel(self, node).transplant(
self,
parent,
Some(prev_sibling),
next_sibling,
)
}
}
impl<Id: InternalNodeId> Default for Hierarchy<Id> {
#[inline]
fn default() -> Self {
Self {
neighbors: Default::default(),
}
}
}
/// Neighbors.
#[derive(Clone, Copy, PartialEq, Eq)]
pub(crate) struct Neighbors<Id> {
/// Parent.
parent: Option<Id>,
/// Cyclic previous sibling.
///
/// `None` if the node has already been removed.
/// If the node is alive and is the first sibling, node ID of the last sibling.
/// Otherwise (i.e. the node is alive but is not the first sibling),
/// node ID of the previous sibling.
///
/// By making this field cyclic, "last child" field becomes unnecessary.
///
/// See
/// <http://www.aosabook.org/en/posa/parsing-xml-at-the-speed-of-light.html#data-structures-for-the-document-object-model>.
prev_sibling_cyclic: Option<Id>,
/// Next sibling.
next_sibling: Option<Id>,
/// First child.
first_child: Option<Id>,
}
impl<Id: InternalNodeId> Neighbors<Id> {
/// Creates a new `Neighbors` that is not connected to anyone.
#[inline]
#[must_use]
fn new_root(id: Id) -> Self {
Self {
parent: None,
prev_sibling_cyclic: Some(id),
next_sibling: None,
first_child: None,
}
}
/// Returns true if the node is alive.
#[inline]
#[must_use]
fn is_alive(&self) -> bool {
self.prev_sibling_cyclic.is_some()
}
/// Returns the node ID of the parent.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
#[inline]
#[must_use]
pub(crate) fn parent(&self) -> Option<Id> {
if !self.is_alive() {
panic!("[precondition] the node must be alive");
}
self.parent
}
/// Returns the node ID of the next sibling.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
#[inline]
#[must_use]
pub(crate) fn next_sibling(&self) -> Option<Id> {
if !self.is_alive() {
panic!("[precondition] the node must be alive");
}
self.next_sibling
}
/// Returns the node ID of the previous sibling.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
#[must_use]
pub(crate) fn prev_sibling(&self, hier: &Hierarchy<Id>) -> Option<Id> {
let prev_sibling_cyclic = match self.prev_sibling_cyclic {
Some(v) => v,
None => panic!("[precondition] the node must be alive"),
};
let prev_cyc_node = hier
.neighbors(prev_sibling_cyclic)
.expect("[consistency] the `prev_sibling_cyclic` node must be alive");
// If `next_sibling` is available, `prev_sibling_cyclic` is a previous node.
// If `next_sibling` is `None`, `prev_sibling_cyclic` is not a previous
// node but the last sibling.
prev_cyc_node.next_sibling.and(Some(prev_sibling_cyclic))
}
/// Returns the node ID of the first child.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
#[inline]
#[must_use]
pub(crate) fn first_child(&self) -> Option<Id> {
if !self.is_alive() {
panic!("[precondition] the node must be alive");
}
self.first_child
}
/// Returns the node ID of the last child.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
#[must_use]
pub(crate) fn last_child(&self, hier: &Hierarchy<Id>) -> Option<Id> {
self.first_last_child(hier).map(|(_first, last)| last)
}
/// Returns the node IDs of the first child and the last child.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
#[inline]
#[must_use]
pub(crate) fn first_last_child(&self, hier: &Hierarchy<Id>) -> Option<(Id, Id)> {
if !self.is_alive() {
panic!("[precondition] the node must be alive");
}
let first_child = self.first_child()?;
let last_child = hier
.neighbors(first_child)
.expect("[consistency] children of a live node must also be alive")
.prev_sibling_cyclic;
match last_child {
Some(last_child) => Some((first_child, last_child)),
None => panic!("[consistency] the last child must be alive"),
}
}
/// Returns true if the node has no neighbors.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
#[must_use]
pub(crate) fn is_alone(&self) -> bool {
if !self.is_alive() {
panic!("[precondition] the node must be alive");
}
self.parent.is_none() && self.next_sibling.is_none() && self.first_child.is_none()
}
/// Makes the node removed state.
///
/// It is caller's responsibility to make the node alone and keep the
/// hierarchy consistent.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
/// Panics if the `self` node is not alone.
pub(crate) fn make_removed(&mut self) {
if !self.is_alone() {
panic!("[precondition] the node must be alive and alone");
}
self.force_make_removed();
}
/// Makes the node removed state **even if it can make the arena inconsistent**.
///
/// It is caller's responsibility to make the node alone and/or make the
/// hierarchy finally consistent.
///
/// # Panics
///
/// Panics if the `self` node has already been removed.
pub(crate) fn force_make_removed(&mut self) {
if !self.is_alive() {
panic!("[precondition] the node must be alive");
}
self.parent = None;
self.prev_sibling_cyclic = None;
self.next_sibling = None;
self.first_child = None;
}
}
// For compact printing.
impl<Id: fmt::Debug> fmt::Debug for Neighbors<Id> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// A wrapper to print optional node ID in compact form.
#[derive(Clone, Copy)]
struct OptNodeId<'a, Id>(&'a Option<Id>);
impl<Id: fmt::Debug> fmt::Debug for OptNodeId<'_, Id> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
Some(id) => id.fmt(f),
None => f.write_str("None"),
}
}
}
f.debug_struct("Neighbors")
.field("parent", &OptNodeId(&self.parent))
.field("prev_sibling_cyclic", &OptNodeId(&self.prev_sibling_cyclic))
.field("next_sibling", &OptNodeId(&self.next_sibling))
.field("first_child", &OptNodeId(&self.first_child))
.finish()
}
}
/// Siblings range.
#[derive(Debug)]
struct SiblingsRange<Id> {
/// First node in the range.
first: Id,
/// Last node in the range.
last: Id,
}
impl<Id: InternalNodeId> SiblingsRange<Id> {
/// Creates a new siblings range.
///
/// # Panics
///
/// * Panics if `first` or `last` is not alive.
/// * Panics if `first` and `last` does not have the same parent node.
// TODO: Ordering:
// This should panic if `first` is a succeeding sibling of `prev`.
// However, this won't be O(1) operation. The hierarchy does not have an
// efficient way to test siblings orders.
// Without testing this, the function should be considered as unsafe.
// For now, it is caller's responsibility to ensure siblings order.
fn new(hier: &Hierarchy<Id>, first: Id, last: Id) -> Self {
if first == last {
return Self::with_single_toplevel(hier, first);
}
let first_parent = hier
.neighbors(first)
.expect("[precondition] `first` node must be alive")
.parent();
let last_parent = hier
.neighbors(last)
.expect("[precondition] `last` node must be alive")
.parent();
if first_parent != last_parent {
panic!("[precondition] `first` and `last` must have the same parent");
}
Self { first, last }
}
/// Creates a new siblings range from a single toplevel node.
///
/// # Panics
///
/// * Panics if the node is not alive.
fn with_single_toplevel(hier: &Hierarchy<Id>, node: Id) -> Self {
if !hier.is_alive(node) {
panic!("[precondition] the node must be alive");
}
Self {
first: node,
last: node,
}
}
/// Inserts the nodes in the range to the given place.
///
/// ```text
/// Before:
///
/// parent
/// / \
/// / \
/// prev_sibling -> next_sibling
///
/// (possible parent)
/// _____________/ __/ \___ \______________
/// / / \ \
/// PREV_OF_FIRST -> self.first --...-> self.last -> NEXT_OF_LAST
///
///
/// After:
///
/// parent
/// _____________/ __/ \___ \______________
/// / / \ \
/// prev_sibling -> self.first --...-> self.last -> next_sibling
///
/// (possible parent)
/// / \
/// / \
/// PREV_OF_FIRST -> NEXT_OF_LAST
/// ```
///
/// # Failures
///
/// * Returns `Err(StructureError::AncestorDescendantLoop)` if the `parent`
/// is a descendant of the node in the range.
///
/// # Panics
///
/// * Panics if the `parent`, `prev_sibling`, or `next_sibling` is not alive.
/// * Panics if the `parent` is not the actual parent of `prev_sibling` and
/// `next_sibling`.
/// * Panics if any node in the range (`self`) is not alive.
fn transplant(
self,
hier: &mut Hierarchy<Id>,
parent: Id,
prev_sibling: Option<Id>,
next_sibling: Option<Id>,
) -> Result<(), StructureError> {
// Detect possible ancestor-descendant loop beforehand.
if self.transplant_would_make_cyclic_links(hier, parent) {
return Err(StructureError::AncestorDescendantLoop);
}
// Detach the nodes.
{
let first_nbs = hier
.neighbors(self.first)
.expect("[consistency] nodes in the range must be alive");
let range_parent = first_nbs.parent();
let prev_of_range = first_nbs.prev_sibling(hier);
let next_of_range = hier
.neighbors(self.last)
.expect("[consistency] nodes in the range must be alive")
.next_sibling();
// Connect the nodes before and after the range.
hier.connect_triangle(range_parent, prev_of_range, next_of_range);
}
// Rewrite parents in the range.
{
let mut child_opt = Some(self.first);
while let Some(child) = child_opt {
assert_ne!(
child, parent,
"[consistency] possibility of ancestor-descendant loop is already tested"
);
let child_nbs = hier
.neighbors_mut(child)
.expect("[consistency] nodes in the range must be alive");
child_nbs.parent = Some(parent);
if child == self.last {
break;
}
child_opt = child_nbs.next_sibling();
}
}
// Connect the first node in the range to the previous sibling.
// If they are identical, no need to update neighbors info.
if prev_sibling != Some(self.first) {
hier.connect_triangle(Some(parent), prev_sibling, Some(self.first));
}
// Connect the last node in the range to the next sibling.
// If they are identical, no need to update neighbors info.
if next_sibling != Some(self.last) {
hier.connect_triangle(Some(parent), Some(self.last), next_sibling);
}
Ok(())
}
/// Returns true if `transplant()` would create cyclic links.
fn transplant_would_make_cyclic_links(&self, hier: &Hierarchy<Id>, parent: Id) -> bool {
let first_nbs = hier
.neighbors(self.first)
.expect("[consistency] nodes in the range must be alive");
if self.first == self.last {
let range_root = self.first;
let mut ancestors = AncestorsTraverser::with_start(parent, hier);
while let Some(ancestor) = ancestors.next(hier) {
if ancestor == range_root {
// `parent` is a descendant node of the range root.
return true;
}
}
} else {
// Every tree in the hierarchy has root node. So, if the toplevel
// nodes in the range don't have parent, then it means the range
// consists of single toplevel node. Such case is already handled
// by `self.first == self.last` branch.
let range_parent = first_nbs
.parent()
.expect("[consistency] range with multiple toplevel nodes must have a parent");
if range_parent == parent {
// The range will be transplanted under the same parent again.
return false;
}
let mut ancestors = AncestorsTraverser::with_start(parent, hier);
// Initially `prev_ancestor` is `parent`.
let mut prev_ancestor = ancestors
.next(hier)
.expect("[validity] start node itself must be iterated");
while let Some(ancestor) = ancestors.next(hier) {
if ancestor != range_parent {
continue;
}
// root:range_parent
// |-- 0
// | `-- 0-0:parent
// |-- 1
// |-- 2
// `-- 3
//
// Consider transplanting [1,2] under 0-0. This should be valid.
//
// root
// |-- 0
// | `-- 0-0
// | |-- 1
// | `-- 2
// `-- 3
//
// `range_parent` is an ancestor of `parent`, but `parent` is
// not the descendant of the range [1, 2].
// Detect such case here.
// In this example, `ancestor` now refers `root` and
// `prev_ancestor` refers `0`.
let mut current = Some(self.first);
while let Some(toplevel) = current {
if toplevel == prev_ancestor {
// `parent` is a descendant of `toplevel`.
return true;
}
if toplevel == self.last {
break;
}
current = hier
.neighbors(toplevel)
.expect("[consistency] nodes in the range must be alive")
.next_sibling();
}
prev_ancestor = ancestor;
}
}
false
}
}
| 34.101604 | 127 | 0.535863 |
e8a06be1907f5eaed171b9869197dcbf5a684aa1 | 1,205 | use crate::types::{BlockHeader, Transaction};
#[derive(Debug)]
pub struct Block {
pub header: BlockHeader,
pub transactions: Vec<Transaction>,
}
impl Block {
pub fn new(h: BlockHeader, t: Vec<Transaction>) -> Block {
Block {
header: h,
transactions: t,
}
}
}
impl std::default::Default for Block {
fn default() -> Block {
Block {
header: BlockHeader::default(),
transactions: Vec::new(),
}
}
}
pub struct BlockBuilder {
blk: Block,
}
impl BlockBuilder {
pub fn new() -> Self {
BlockBuilder {
blk: Block::default(),
}
}
pub fn header(&mut self, header: BlockHeader) -> &mut Self {
self.blk.header = header;
self
}
pub fn transaction(&mut self, tx: Transaction) -> &mut Self {
self.blk.transactions.push(tx);
self
}
pub fn transactions(&mut self, txs: &mut Vec<Transaction>) -> &mut Self {
self.blk.transactions.append(txs);
self
}
pub fn build(self) -> Block {
Block {
header: self.blk.header,
transactions: self.blk.transactions,
}
}
}
| 21.517857 | 77 | 0.540249 |
3ab482b1ca52ffb7550421ccbcd058996d6cd261 | 12,007 | use std::time::Duration;
use futures::future::FutureExt;
use once_cell::sync::OnceCell;
use rmqtt::{broker::{
default::DefaultShared,
Entry,
session::{ClientInfo, Session, SessionOfflineInfo},
Shared, SubRelations, SubRelationsMap, types::{From, Id, NodeId, Publish, Reason, SessionStatus,
Subscribe, SubscribeReturn, To, Tx, Unsubscribe},
}, grpc::{Message, MessageReply, MessageType}, MqttError, Result, Runtime};
use super::{ClusterRouter, GrpcClients, MessageSender, NodeGrpcClient};
use super::message::Message as RaftMessage;
use super::message::MessageReply as RaftMessageReply;
pub struct ClusterLockEntry {
inner: Box<dyn Entry>,
cluster_shared: &'static ClusterShared,
prev_node_id: Option<NodeId>,
}
impl ClusterLockEntry {
#[inline]
pub fn new(inner: Box<dyn Entry>, cluster_shared: &'static ClusterShared, prev_node_id: Option<NodeId>) -> Self {
Self { inner, cluster_shared, prev_node_id }
}
}
#[async_trait]
impl Entry for ClusterLockEntry {
#[inline]
async fn try_lock(&self) -> Result<Box<dyn Entry>> {
let msg = RaftMessage::HandshakeTryLock { id: self.id() }
.encode()?;
let raft_mailbox = self.cluster_shared.router.raft_mailbox().await;
let reply = raft_mailbox.send(msg).await.map_err(anyhow::Error::new)?;
let mut prev_node_id = None;
if !reply.is_empty() {
match RaftMessageReply::decode(&reply)? {
RaftMessageReply::Error(e) => {
return Err(MqttError::Msg(e));
}
RaftMessageReply::HandshakeTryLock(prev_id) => {
prev_node_id = prev_id.map(|id| id.node_id);
log::debug!(
"{:?} ClusterLockEntry try_lock prev_node_id: {:?}",
self.client().await.map(|c| c.id.clone()), prev_node_id
);
}
}
}
Ok(Box::new(ClusterLockEntry::new(self.inner.try_lock().await?, self.cluster_shared, prev_node_id)))
}
#[inline]
fn id(&self) -> Id {
self.inner.id()
}
#[inline]
async fn set(&mut self, session: Session, tx: Tx, conn: ClientInfo) -> Result<()> {
let msg = RaftMessage::Connected { id: session.id.clone() }
.encode()?;
let raft_mailbox = self.cluster_shared.router.raft_mailbox().await;
let reply = raft_mailbox.send(msg).await.map_err(anyhow::Error::new)?;
if !reply.is_empty() {
let reply = RaftMessageReply::decode(&reply)?;
match reply {
RaftMessageReply::Error(e) => {
return Err(MqttError::Msg(e));
}
_ => {
log::error!("unreachable!(), {:?}", reply);
unreachable!()
}
}
}
self.inner.set(session, tx, conn).await
}
#[inline]
async fn remove(&mut self) -> Result<Option<(Session, Tx, ClientInfo)>> {
self.inner.remove().await
}
#[inline]
async fn kick(&mut self, clear_subscriptions: bool) -> Result<Option<SessionOfflineInfo>> {
log::debug!(
"{:?} ClusterLockEntry kick ..., clear_subscriptions: {}",
self.client().await.map(|c| c.id.clone()),
clear_subscriptions
);
let id = self.id();
log::debug!("{:?} kick, prev_node_id: {:?}", id, self.prev_node_id);
let prev_node_id = self.prev_node_id.unwrap_or(id.node_id);
if prev_node_id == id.node_id {
//kicked from local
self.inner.kick(clear_subscriptions).await
} else {
//kicked from other node
if let Some(client) = self.cluster_shared.grpc_client(prev_node_id) {
let mut msg_sender = MessageSender {
client,
msg_type: self.cluster_shared.message_type,
msg: Message::Kick(id.clone(), true), //clear_subscriptions
max_retries: 0,
retry_interval: Duration::from_millis(500),
};
match msg_sender.send().await {
Ok(reply) => {
if let MessageReply::Kick(Some(kicked)) = reply {
log::debug!("{:?} kicked: {:?}", id, kicked);
Ok(Some(kicked))
} else {
log::info!(
"{:?} Message::Kick from other node, prev_node_id: {:?}, reply: {:?}",
id,
prev_node_id,
reply
);
Ok(None)
}
}
Err(e) => {
log::error!(
"{:?} Message::Kick from other node, prev_node_id: {:?}, error: {:?}",
id,
prev_node_id,
e
);
Ok(None)
}
}
} else {
log::error!(
"{:?} kick error, grpc_client is not exist, prev_node_id: {:?}",
id,
prev_node_id,
);
Ok(None)
}
}
}
#[inline]
async fn is_connected(&self) -> bool {
self.inner.is_connected().await
}
#[inline]
async fn session(&self) -> Option<Session> {
self.inner.session().await
}
#[inline]
async fn client(&self) -> Option<ClientInfo> {
self.inner.client().await
}
#[inline]
fn tx(&self) -> Option<Tx> {
self.inner.tx()
}
#[inline]
async fn subscribe(&self, subscribe: Subscribe) -> Result<SubscribeReturn> {
self.inner.subscribe(subscribe).await
}
#[inline]
async fn unsubscribe(&self, unsubscribe: &Unsubscribe) -> Result<()> {
self.inner.unsubscribe(unsubscribe).await
}
#[inline]
async fn publish(&self, from: From, p: Publish) -> Result<(), (From, Publish, Reason)> {
self.inner.publish(from, p).await
}
}
pub struct ClusterShared {
inner: &'static DefaultShared,
router: &'static ClusterRouter,
_grpc_clients: GrpcClients,
pub message_type: MessageType,
}
impl ClusterShared {
#[inline]
pub(crate) fn get_or_init(
router: &'static ClusterRouter,
grpc_clients: GrpcClients,
message_type: MessageType,
) -> &'static ClusterShared {
static INSTANCE: OnceCell<ClusterShared> = OnceCell::new();
INSTANCE.get_or_init(|| Self { inner: DefaultShared::instance(), router, _grpc_clients: grpc_clients, message_type })
}
#[inline]
pub(crate) fn inner(&self) -> Box<dyn Shared> {
Box::new(self.inner)
}
#[inline]
pub(crate) fn grpc_client(&self, node_id: u64) -> Option<NodeGrpcClient> {
self._grpc_clients.get(&node_id).map(|c| c.value().clone())
}
}
#[async_trait]
impl Shared for &'static ClusterShared {
#[inline]
fn entry(&self, id: Id) -> Box<dyn Entry> {
Box::new(ClusterLockEntry::new(self.inner.entry(id), self, None))
}
#[inline]
fn id(&self, client_id: &str) -> Option<Id> {
self.router.id(client_id)
}
#[inline]
async fn forwards(&self, from: From, publish: Publish) -> Result<(), Vec<(To, From, Publish, Reason)>> {
log::debug!("[forwards] from: {:?}, publish: {:?}", from, publish);
let topic = publish.topic();
let mut relations_map =
match Runtime::instance().extends.router().await.matches(publish.topic()).await {
Ok(relations_map) => relations_map,
Err(e) => {
log::warn!("forwards, from:{:?}, topic:{:?}, error: {:?}", from, topic, e);
SubRelationsMap::default()
}
};
let mut errs = Vec::new();
let this_node_id = Runtime::instance().node.id();
if let Some(relations) = relations_map.remove(&this_node_id) {
//forwards to local
if let Err(e) = self.forwards_to(from.clone(), &publish, relations).await {
errs.extend(e);
}
}
if !relations_map.is_empty() {
log::debug!("forwards to other nodes, relations_map:{:?}", relations_map);
//forwards to other nodes
let mut fut_senders = Vec::new();
for (node_id, relations) in relations_map {
if let Some(client) = self.grpc_client(node_id) {
let from = from.clone();
let publish = publish.clone();
let message_type = self.message_type;
let fut_sender = async move {
let mut msg_sender = MessageSender {
client,
msg_type: message_type,
msg: Message::ForwardsTo(from, publish, relations),
max_retries: 1,
retry_interval: Duration::from_millis(500),
};
(node_id, msg_sender.send().await)
};
fut_senders.push(fut_sender.boxed());
} else {
log::error!(
"forwards error, grpc_client is not exist, node_id: {}, relations: {:?}",
node_id,
relations
);
}
}
tokio::spawn(async move {
let replys = futures::future::join_all(fut_senders).await;
for (node_id, reply) in replys {
if let Err(e) = reply {
log::error!(
"forwards Message::ForwardsTo to other node, from: {:?}, to: {:?}, error: {:?}",
from,
node_id,
e
);
}
}
});
}
if errs.is_empty() {
Ok(())
} else {
Err(errs)
}
}
#[inline]
async fn forwards_to(
&self,
from: From,
publish: &Publish,
relations: SubRelations,
) -> Result<(), Vec<(To, From, Publish, Reason)>> {
self.inner.forwards_to(from, publish, relations).await
}
#[inline]
async fn forwards_and_get_shareds(
&self,
from: From,
publish: Publish,
) -> Result<SubRelationsMap, Vec<(To, From, Publish, Reason)>> {
self.inner.forwards_and_get_shareds(from, publish).await
}
#[inline]
async fn clients(&self) -> usize {
self.inner.clients().await
}
#[inline]
async fn sessions(&self) -> usize {
self.inner.sessions().await
}
#[inline]
async fn all_clients(&self) -> usize {
self.router.all_onlines()
}
#[inline]
async fn all_sessions(&self) -> usize {
self.router.all_statuses()
}
#[inline]
fn iter(&self) -> Box<dyn Iterator<Item=Box<dyn Entry>> + Sync + Send> {
self.inner.iter()
}
#[inline]
fn random_session(&self) -> Option<(Session, ClientInfo)> {
self.inner.random_session()
}
#[inline]
async fn session_status(&self, client_id: &str) -> Option<SessionStatus> {
let try_lock_timeout = self.router.try_lock_timeout;
self.router.status(client_id).map(|s| {
SessionStatus {
handshaking: s.handshaking(try_lock_timeout),
id: s.id,
online: s.online,
}
})
}
}
| 33.260388 | 125 | 0.500291 |
dbbc6b76b58de3b84e53a921f51fdd22e2723534 | 677 | fn main() {
println!("Sort numbers ascending");
let mut numbers = [4, 65, 2, -31, 0, 99, 2, 83, 782, 1];
println!("Before: {:?}", numbers);
bubble_sort(&mut numbers);
println!("After: {:?}\n", numbers);
println!("Sort strings alphabetically");
let mut strings = ["beach", "hotel", "airplane", "car", "house", "art"];
println!("Before: {:?}", strings);
bubble_sort(&mut strings);
println!("After: {:?}\n", strings);
}
pub fn bubble_sort<T: Ord>(arr: &mut [T]) {
for i in 0..arr.len() {
for j in 0..arr.len() - 1 - i {
if arr[j] > arr[j + 1] {
arr.swap(j, j + 1);
}
}
}
} | 29.434783 | 76 | 0.493353 |
71c07f2cb50a91d6e3e2c0e27007d54c4df785f6 | 2,536 | // Copyright (c) The Dijets Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::experiments::Experiment;
use anyhow::Result;
use cli::client_proxy::ClientProxy;
use dijets_types::{
transaction::{ChangeSet, Transaction, TransactionPayload, WriteSetPayload},
write_set::WriteSet,
};
pub(crate) struct GetWriteSetVersion();
impl Experiment for GetWriteSetVersion {
fn name(&self) -> &'static str {
"get_writeset_version"
}
fn setup_states(&self, client: &mut ClientProxy) -> Result<()> {
let writeset = TransactionPayload::WriteSet(WriteSetPayload::Direct(ChangeSet::new(
WriteSet::default(),
vec![],
)));
client.association_transaction_with_local_dijets_root_account(writeset, true)
}
fn description(&self) -> &'static str {
"This tasks asks you to get the version number of the last committed writeset. There should be multiple ways to get this value:\n\
1. Look for the transactions sent by the dijets_root address.\n\
2. (Recommended) Each writeset transaction will emit a AdminTransactionEvent and modify the DijetsWriteSetManager resource under dijets_root account."
}
fn hint(&self) -> &'static str {
"`transaction-replay` binary should have `annotate-account` mode. With this command you will be able to print out the DijetsWriteSetManager resource under dijets_root. This resource will contain an EventKey which you can use to query the history of committed WriteSet. Use cli tool to fetch events in that event stream and it should tell you which version(transaction) created this event."
}
fn check(&self, client: &mut ClientProxy, input: &str) -> Result<bool> {
if let Ok(seq) = input.parse::<u64>() {
let txn: Transaction = bcs::from_bytes(
client
.client
.get_txn_by_range(seq, 1, false)?
.pop()
.unwrap()
.bytes
.inner(),
)?;
match txn {
Transaction::UserTransaction(user_txn) => match user_txn.payload() {
TransactionPayload::WriteSet(_) => Ok(true),
_ => Ok(false),
},
_ => Ok(false),
}
} else {
println!("Expects integer as an input");
Ok(false)
}
}
fn reset_states(&self, _client: &mut ClientProxy) -> Result<()> {
Ok(())
}
}
| 40.903226 | 397 | 0.609621 |
cc31546da9ed98bebc5d5af29f70f9e6647b6272 | 4,074 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module implements a checker for verifying that a script (or script function when serving
//! as the entry point for script execution) has a valid signature, which entails
//! - All signer arguments are occur before non-signer arguments
//! - All types non-signer arguments have a type that is valid for constants
//! - Has an empty return type
use crate::binary_views::BinaryIndexedView;
use alloc::string::ToString;
use move_binary_format::{
access::ModuleAccess,
errors::{Location, PartialVMError, PartialVMResult, VMResult},
file_format::{
CompiledModule, CompiledScript, SignatureIndex, SignatureToken, TableIndex, Visibility,
},
file_format_common::VERSION_1,
IndexKind,
};
use move_core_types::{identifier::IdentStr, vm_status::StatusCode};
/// This function checks the extra requirements on the signature of the main function of a script.
pub fn verify_script(script: &CompiledScript) -> VMResult<()> {
let resolver = &BinaryIndexedView::Script(script);
let parameters = script.as_inner().parameters;
let return_type_opt = None;
verify_main_signature_impl(resolver, parameters, return_type_opt)
.map_err(|e| e.finish(Location::Script))
}
/// This function checks the extra requirements on the signature of the script visible function
/// when it serves as an entry point for script execution
pub fn verify_module_script_function(module: &CompiledModule, name: &IdentStr) -> VMResult<()> {
let fdef_opt = module.function_defs().iter().enumerate().find(|(_, fdef)| {
module.identifier_at(module.function_handle_at(fdef.function).name) == name
});
let (idx, fdef) = fdef_opt.ok_or_else(|| {
PartialVMError::new(StatusCode::VERIFICATION_ERROR)
.with_message("function not found in verify_module_script_function".to_string())
.finish(Location::Module(module.self_id()))
})?;
match fdef.visibility {
Visibility::Script => (),
Visibility::Private | Visibility::Friend | Visibility::Public => {
return Err(PartialVMError::new(
StatusCode::EXECUTE_SCRIPT_FUNCTION_CALLED_ON_NON_SCRIPT_VISIBLE,
)
.at_index(IndexKind::FunctionDefinition, idx as TableIndex)
.finish(Location::Module(module.self_id())))
}
}
let resolver = &BinaryIndexedView::Module(module);
let fhandle = module.function_handle_at(fdef.function);
let parameters = fhandle.parameters;
let return_type_opt = Some(fhandle.return_);
verify_main_signature_impl(resolver, parameters, return_type_opt).map_err(|e| {
e.at_index(IndexKind::FunctionDefinition, idx as TableIndex)
.finish(Location::Module(module.self_id()))
})
}
fn verify_main_signature_impl(
resolver: &BinaryIndexedView,
parameters: SignatureIndex,
return_type_opt: Option<SignatureIndex>,
) -> PartialVMResult<()> {
use SignatureToken as S;
let arguments = &resolver.signature_at(parameters).0;
// Check that all `signer` arguments occur before non-`signer` arguments
// signer is a type that can only be populated by the Move VM. And its value is filled
// based on the sender of the transaction
let all_args_have_valid_type = if resolver.version() <= VERSION_1 {
arguments
.iter()
.skip_while(|typ| matches!(typ, S::Reference(inner) if matches!(&**inner, S::Signer)))
.all(|typ| typ.is_valid_for_constant())
} else {
arguments
.iter()
.skip_while(|typ| matches!(typ, S::Signer))
.all(|typ| typ.is_valid_for_constant())
};
let has_valid_return_type = match return_type_opt {
Some(idx) => resolver.signature_at(idx).0.is_empty(),
None => true,
};
if !all_args_have_valid_type || !has_valid_return_type {
Err(PartialVMError::new(
StatusCode::INVALID_MAIN_FUNCTION_SIGNATURE,
))
} else {
Ok(())
}
}
| 42 | 98 | 0.685322 |
cc9992ed7ab8c3e438a06f8c6b10fbc7f3f63fa0 | 15,821 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Operation shape for `AssociateLicense`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`associate_license`](crate::client::Client::associate_license).
///
/// See [`crate::client::fluent_builders::AssociateLicense`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct AssociateLicense {
_private: (),
}
impl AssociateLicense {
/// Creates a new builder-style object to manufacture [`AssociateLicenseInput`](crate::input::AssociateLicenseInput)
pub fn builder() -> crate::input::associate_license_input::Builder {
crate::input::associate_license_input::Builder::default()
}
/// Creates a new `AssociateLicense` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for AssociateLicense {
type Output = std::result::Result<
crate::output::AssociateLicenseOutput,
crate::error::AssociateLicenseError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_associate_license_error(response)
} else {
crate::operation_deser::parse_associate_license_response(response)
}
}
}
/// Operation shape for `CreateWorkspace`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`create_workspace`](crate::client::Client::create_workspace).
///
/// See [`crate::client::fluent_builders::CreateWorkspace`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateWorkspace {
_private: (),
}
impl CreateWorkspace {
/// Creates a new builder-style object to manufacture [`CreateWorkspaceInput`](crate::input::CreateWorkspaceInput)
pub fn builder() -> crate::input::create_workspace_input::Builder {
crate::input::create_workspace_input::Builder::default()
}
/// Creates a new `CreateWorkspace` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for CreateWorkspace {
type Output = std::result::Result<
crate::output::CreateWorkspaceOutput,
crate::error::CreateWorkspaceError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_create_workspace_error(response)
} else {
crate::operation_deser::parse_create_workspace_response(response)
}
}
}
/// Operation shape for `DeleteWorkspace`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`delete_workspace`](crate::client::Client::delete_workspace).
///
/// See [`crate::client::fluent_builders::DeleteWorkspace`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteWorkspace {
_private: (),
}
impl DeleteWorkspace {
/// Creates a new builder-style object to manufacture [`DeleteWorkspaceInput`](crate::input::DeleteWorkspaceInput)
pub fn builder() -> crate::input::delete_workspace_input::Builder {
crate::input::delete_workspace_input::Builder::default()
}
/// Creates a new `DeleteWorkspace` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DeleteWorkspace {
type Output = std::result::Result<
crate::output::DeleteWorkspaceOutput,
crate::error::DeleteWorkspaceError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_delete_workspace_error(response)
} else {
crate::operation_deser::parse_delete_workspace_response(response)
}
}
}
/// Operation shape for `DescribeWorkspace`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`describe_workspace`](crate::client::Client::describe_workspace).
///
/// See [`crate::client::fluent_builders::DescribeWorkspace`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeWorkspace {
_private: (),
}
impl DescribeWorkspace {
/// Creates a new builder-style object to manufacture [`DescribeWorkspaceInput`](crate::input::DescribeWorkspaceInput)
pub fn builder() -> crate::input::describe_workspace_input::Builder {
crate::input::describe_workspace_input::Builder::default()
}
/// Creates a new `DescribeWorkspace` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DescribeWorkspace {
type Output = std::result::Result<
crate::output::DescribeWorkspaceOutput,
crate::error::DescribeWorkspaceError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_workspace_error(response)
} else {
crate::operation_deser::parse_describe_workspace_response(response)
}
}
}
/// Operation shape for `DescribeWorkspaceAuthentication`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`describe_workspace_authentication`](crate::client::Client::describe_workspace_authentication).
///
/// See [`crate::client::fluent_builders::DescribeWorkspaceAuthentication`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeWorkspaceAuthentication {
_private: (),
}
impl DescribeWorkspaceAuthentication {
/// Creates a new builder-style object to manufacture [`DescribeWorkspaceAuthenticationInput`](crate::input::DescribeWorkspaceAuthenticationInput)
pub fn builder() -> crate::input::describe_workspace_authentication_input::Builder {
crate::input::describe_workspace_authentication_input::Builder::default()
}
/// Creates a new `DescribeWorkspaceAuthentication` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DescribeWorkspaceAuthentication {
type Output = std::result::Result<
crate::output::DescribeWorkspaceAuthenticationOutput,
crate::error::DescribeWorkspaceAuthenticationError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_workspace_authentication_error(response)
} else {
crate::operation_deser::parse_describe_workspace_authentication_response(response)
}
}
}
/// Operation shape for `DisassociateLicense`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`disassociate_license`](crate::client::Client::disassociate_license).
///
/// See [`crate::client::fluent_builders::DisassociateLicense`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DisassociateLicense {
_private: (),
}
impl DisassociateLicense {
/// Creates a new builder-style object to manufacture [`DisassociateLicenseInput`](crate::input::DisassociateLicenseInput)
pub fn builder() -> crate::input::disassociate_license_input::Builder {
crate::input::disassociate_license_input::Builder::default()
}
/// Creates a new `DisassociateLicense` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DisassociateLicense {
type Output = std::result::Result<
crate::output::DisassociateLicenseOutput,
crate::error::DisassociateLicenseError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_disassociate_license_error(response)
} else {
crate::operation_deser::parse_disassociate_license_response(response)
}
}
}
/// Operation shape for `ListPermissions`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`list_permissions`](crate::client::Client::list_permissions).
///
/// See [`crate::client::fluent_builders::ListPermissions`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListPermissions {
_private: (),
}
impl ListPermissions {
/// Creates a new builder-style object to manufacture [`ListPermissionsInput`](crate::input::ListPermissionsInput)
pub fn builder() -> crate::input::list_permissions_input::Builder {
crate::input::list_permissions_input::Builder::default()
}
/// Creates a new `ListPermissions` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for ListPermissions {
type Output = std::result::Result<
crate::output::ListPermissionsOutput,
crate::error::ListPermissionsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_permissions_error(response)
} else {
crate::operation_deser::parse_list_permissions_response(response)
}
}
}
/// Operation shape for `ListWorkspaces`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`list_workspaces`](crate::client::Client::list_workspaces).
///
/// See [`crate::client::fluent_builders::ListWorkspaces`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListWorkspaces {
_private: (),
}
impl ListWorkspaces {
/// Creates a new builder-style object to manufacture [`ListWorkspacesInput`](crate::input::ListWorkspacesInput)
pub fn builder() -> crate::input::list_workspaces_input::Builder {
crate::input::list_workspaces_input::Builder::default()
}
/// Creates a new `ListWorkspaces` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for ListWorkspaces {
type Output =
std::result::Result<crate::output::ListWorkspacesOutput, crate::error::ListWorkspacesError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_workspaces_error(response)
} else {
crate::operation_deser::parse_list_workspaces_response(response)
}
}
}
/// Operation shape for `UpdatePermissions`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`update_permissions`](crate::client::Client::update_permissions).
///
/// See [`crate::client::fluent_builders::UpdatePermissions`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdatePermissions {
_private: (),
}
impl UpdatePermissions {
/// Creates a new builder-style object to manufacture [`UpdatePermissionsInput`](crate::input::UpdatePermissionsInput)
pub fn builder() -> crate::input::update_permissions_input::Builder {
crate::input::update_permissions_input::Builder::default()
}
/// Creates a new `UpdatePermissions` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for UpdatePermissions {
type Output = std::result::Result<
crate::output::UpdatePermissionsOutput,
crate::error::UpdatePermissionsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_permissions_error(response)
} else {
crate::operation_deser::parse_update_permissions_response(response)
}
}
}
/// Operation shape for `UpdateWorkspace`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`update_workspace`](crate::client::Client::update_workspace).
///
/// See [`crate::client::fluent_builders::UpdateWorkspace`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateWorkspace {
_private: (),
}
impl UpdateWorkspace {
/// Creates a new builder-style object to manufacture [`UpdateWorkspaceInput`](crate::input::UpdateWorkspaceInput)
pub fn builder() -> crate::input::update_workspace_input::Builder {
crate::input::update_workspace_input::Builder::default()
}
/// Creates a new `UpdateWorkspace` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for UpdateWorkspace {
type Output = std::result::Result<
crate::output::UpdateWorkspaceOutput,
crate::error::UpdateWorkspaceError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_update_workspace_error(response)
} else {
crate::operation_deser::parse_update_workspace_response(response)
}
}
}
/// Operation shape for `UpdateWorkspaceAuthentication`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`update_workspace_authentication`](crate::client::Client::update_workspace_authentication).
///
/// See [`crate::client::fluent_builders::UpdateWorkspaceAuthentication`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateWorkspaceAuthentication {
_private: (),
}
impl UpdateWorkspaceAuthentication {
/// Creates a new builder-style object to manufacture [`UpdateWorkspaceAuthenticationInput`](crate::input::UpdateWorkspaceAuthenticationInput)
pub fn builder() -> crate::input::update_workspace_authentication_input::Builder {
crate::input::update_workspace_authentication_input::Builder::default()
}
/// Creates a new `UpdateWorkspaceAuthentication` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for UpdateWorkspaceAuthentication {
type Output = std::result::Result<
crate::output::UpdateWorkspaceAuthenticationOutput,
crate::error::UpdateWorkspaceAuthenticationError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_workspace_authentication_error(response)
} else {
crate::operation_deser::parse_update_workspace_authentication_response(response)
}
}
}
| 42.41555 | 150 | 0.693951 |
23eb2ded242e1e72822990594818d16502cfa719 | 2,683 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use channel::{aptos_channel, message_queues::QueueStyle};
use futures::{executor::block_on, stream::StreamExt};
use std::{
io::{Cursor, Write},
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
thread,
time::Duration,
};
/// A small benchmark/stress test that sends `num_msgs` for each `num_keys`. The
/// default arguments simulate many transient keys that just push a single message
/// and then never more. Without garbage collecting empty per-key-queues, the
/// program will eventually OOM.
#[derive(Debug)]
pub struct Args {
num_keys: usize,
num_msgs: usize,
max_queue_size: usize,
}
pub fn run(args: Args) {
// Simulates an AccountAddress/PeerId
const KEY_SIZE_BYTES: usize = 16;
// Simulates a (PeerManagerRequest, Option<Arc<_>>)
const MSG_SIZE_BYTES: usize = 96;
static NUM_PUSH: AtomicUsize = AtomicUsize::new(0);
static NUM_POP: AtomicUsize = AtomicUsize::new(0);
static IS_DONE: AtomicBool = AtomicBool::new(false);
let (sender, mut receiver) = aptos_channel::new::<[u8; KEY_SIZE_BYTES], [u8; MSG_SIZE_BYTES]>(
QueueStyle::FIFO,
args.max_queue_size,
None,
);
let sender_thread = thread::spawn(move || {
for idx in 0..args.num_keys {
let mut key = [0u8; KEY_SIZE_BYTES];
let mut cursor = Cursor::new(&mut key[..]);
cursor.write_all(&idx.to_le_bytes()).unwrap();
for msg_idx in 0..args.num_msgs {
let mut msg = [0u8; MSG_SIZE_BYTES];
let mut cursor = Cursor::new(&mut msg[..]);
cursor.write_all(&msg_idx.to_le_bytes()).unwrap();
sender.push(key, msg).unwrap();
}
NUM_PUSH.fetch_add(1, Ordering::Relaxed);
}
});
let logger_thread = thread::spawn(move || {
while !IS_DONE.load(Ordering::Relaxed) {
println!(
"NUM_PUSH: {}, NUM_POP: {}",
NUM_PUSH.load(Ordering::Relaxed),
NUM_POP.load(Ordering::Relaxed),
);
thread::sleep(Duration::from_secs(1));
}
});
// just drain messages
let receiver_task = async move {
while receiver.next().await.is_some() {
NUM_POP.fetch_add(1, Ordering::Relaxed);
}
};
block_on(receiver_task);
sender_thread.join().unwrap();
IS_DONE.store(true, Ordering::Relaxed);
logger_thread.join().unwrap();
}
#[test]
fn test_many_keys_stress_test() {
let args = Args {
num_keys: 100,
num_msgs: 1,
max_queue_size: 10,
};
run(args);
}
| 28.542553 | 98 | 0.601565 |
096f6376355d449f9305c04fc5e142bb74fcf938 | 818 | use std::collections::HashMap;
static VALID_NUCLEOTIDES: &'static str = "ACGT";
fn valid(c: char) -> Result<char, char> {
if VALID_NUCLEOTIDES.contains(c) {
Ok(c)
} else {
Err(c)
}
}
pub fn count(nucleotide: char, input: &str) -> Result<usize, char> {
valid(nucleotide)?;
let mut count = 0;
for c in input.chars() {
if valid(c)? == nucleotide {
count += 1;
}
}
Ok(count)
}
pub fn nucleotide_counts(input: &str) -> Result<HashMap<char, usize>, char> {
let mut map: HashMap<char, usize> = VALID_NUCLEOTIDES.chars().map(|c| (c, 0)).collect();
for nucleotide in input.chars() {
if let Some(n) = map.get_mut(&nucleotide) {
*n += 1;
} else {
return Err(nucleotide);
}
}
Ok(map)
}
| 23.371429 | 92 | 0.546455 |
f5be404cf4db29db56ca6347fcc63d9419ecf120 | 2,909 | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use crate::types::{error::Error, TreasuryOutput};
use bee_common::packable::{Packable, Read, Write};
use bee_message::{
constants::IOTA_SUPPLY,
milestone::MilestoneIndex,
output::Output,
payload::{receipt::ReceiptPayload, Payload},
};
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Receipt {
inner: ReceiptPayload,
included_in: MilestoneIndex,
}
impl Receipt {
pub fn new(inner: ReceiptPayload, included_in: MilestoneIndex) -> Self {
Self { inner, included_in }
}
pub fn inner(&self) -> &ReceiptPayload {
&self.inner
}
pub fn included_in(&self) -> &MilestoneIndex {
&self.included_in
}
pub fn validate(&self, consumed_treasury_output: &TreasuryOutput) -> Result<(), Error> {
let mut migrated_amount: u64 = 0;
let transaction = match self.inner().transaction() {
Payload::TreasuryTransaction(transaction) => transaction,
payload => return Err(Error::UnsupportedPayloadKind(payload.kind())),
};
for funds in self.inner().funds() {
migrated_amount = migrated_amount
.checked_add(funds.output().amount())
.ok_or_else(|| Error::InvalidMigratedFundsAmount(migrated_amount + funds.output().amount()))?;
}
if migrated_amount > IOTA_SUPPLY {
return Err(Error::InvalidMigratedFundsAmount(migrated_amount));
}
let created_treasury_output = match transaction.output() {
Output::Treasury(output) => output,
output => return Err(Error::UnsupportedOutputKind(output.kind())),
};
let created_amount = consumed_treasury_output
.inner()
.amount()
.checked_sub(migrated_amount)
.ok_or_else(|| {
Error::InvalidMigratedFundsAmount(consumed_treasury_output.inner().amount() - migrated_amount)
})?;
if created_amount != created_treasury_output.amount() {
return Err(Error::TreasuryAmountMismatch(
created_amount,
created_treasury_output.amount(),
));
}
Ok(())
}
}
impl Packable for Receipt {
type Error = Error;
fn packed_len(&self) -> usize {
self.inner.packed_len() + self.inner.packed_len() + self.included_in.packed_len()
}
fn pack<W: Write>(&self, writer: &mut W) -> Result<(), Self::Error> {
self.inner.pack(writer)?;
self.included_in.pack(writer)?;
Ok(())
}
fn unpack_inner<R: Read + ?Sized, const CHECK: bool>(reader: &mut R) -> Result<Self, Self::Error> {
let inner = ReceiptPayload::unpack_inner::<R, CHECK>(reader)?;
let included_in = MilestoneIndex::unpack_inner::<R, CHECK>(reader)?;
Ok(Self::new(inner, included_in))
}
}
| 30.621053 | 110 | 0.611894 |
690e55b31b74716afcb7a8b10788171797815512 | 1,366 | use {
minfac::ServiceCollection,
std::sync::Arc,
libloading::{Library, Symbol},
std::env::consts::{DLL_PREFIX, DLL_SUFFIX}
};
type ServiceRegistrar = unsafe extern "C" fn(&mut minfac::ServiceCollection);
///
/// # Expected output
///
/// plugin: Register Service
/// plugin: I duplicate 2
/// Runtime: service.call(2) = 4
/// Runtime: Get 42 multiplied by 3: 126
///
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut collection = ServiceCollection::new();
collection.register(|| 42);
// Lib must be referenced outside of unsafe block, because it's dropped otherwise, sporadically resulting in a segfault
let _lib = unsafe {
let lib = Library::new(format!("target/debug/{}plugin{}", DLL_PREFIX, DLL_SUFFIX))?;
let func: Symbol<ServiceRegistrar> = lib.get(b"register")?;
func(&mut collection);
lib
};
let provider = collection
.build()
.expect("Expected all dependencies to resolve");
let service = provider
.get::<Arc<dyn interface::Service>>()
.expect("Expected plugin to register a &dyn Service");
println!("Runtime: service.call(2) = {}", service.call(2));
let number = provider
.get::<i64>()
.expect("Expected plugin to register i64");
println!("Runtime: Get 42 multiplied by 3: {}", number);
Ok(())
}
| 29.06383 | 123 | 0.625183 |
de7876d58ffac3f27ad1865d98286bcf4a9c4e99 | 17,116 | use std::{
fmt::Debug,
io::Write,
sync::{Arc, RwLock},
};
use swc_common::{comments::SingleThreadedComments, FileName, SourceMap};
use swc_ecma_parser;
use testing::DebugUsingDisplay;
use self::swc_ecma_parser::{EsConfig, Parser, StringInput, Syntax};
use super::*;
use crate::{config::Config, text_writer::omit_trailing_semi};
struct Builder {
cfg: Config,
cm: Lrc<SourceMap>,
comments: SingleThreadedComments,
target: EsVersion,
}
impl Builder {
pub fn with<'a, F, Ret>(self, src: &str, s: &'a mut Vec<u8>, op: F) -> Ret
where
F: for<'aa> FnOnce(&mut Emitter<'aa, Box<(dyn WriteJs + 'aa)>>) -> Ret,
Ret: 'static,
{
let writer =
text_writer::JsWriter::with_target(self.cm.clone(), "\n", s, None, self.target);
let writer: Box<dyn WriteJs> = if self.cfg.minify {
Box::new(omit_trailing_semi(writer))
} else {
Box::new(writer)
};
{
let mut e = Emitter {
cfg: self.cfg,
cm: self.cm.clone(),
wr: writer,
comments: Some(&self.comments),
};
op(&mut e)
}
}
pub fn text<F>(self, src: &str, op: F) -> String
where
F: for<'aa> FnOnce(&mut Emitter<'aa, Box<(dyn WriteJs + 'aa)>>),
{
let mut buf = vec![];
self.with(src, &mut buf, op);
String::from_utf8(buf).unwrap()
}
}
fn parse_then_emit(from: &str, cfg: Config, syntax: Syntax, target: EsVersion) -> String {
::testing::run_test(false, |cm, handler| {
let src = cm.new_source_file(FileName::Real("custom.js".into()), from.to_string());
println!(
"--------------------\nSource: \n{}\nPos: {:?} ~ {:?}\n",
from, src.start_pos, src.end_pos
);
let comments = Default::default();
let res = {
let mut parser = Parser::new(syntax, StringInput::from(&*src), Some(&comments));
let res = parser
.parse_module()
.map_err(|e| e.into_diagnostic(handler).emit());
for err in parser.take_errors() {
err.into_diagnostic(handler).emit()
}
res?
};
let out = Builder {
cfg,
cm,
comments,
target,
}
.text(from, |e| e.emit_module(&res).unwrap());
Ok(out)
})
.unwrap()
}
#[track_caller]
pub(crate) fn assert_min(from: &str, to: &str) {
let out = parse_then_emit(
from,
Config { minify: true },
Syntax::Es(EsConfig {
static_blocks: true,
..Default::default()
}),
EsVersion::latest(),
);
assert_eq!(DebugUsingDisplay(out.trim()), DebugUsingDisplay(to),);
}
pub(crate) fn assert_min_target(from: &str, to: &str, target: EsVersion) {
let out = parse_then_emit(from, Config { minify: true }, Syntax::default(), target);
assert_eq!(DebugUsingDisplay(out.trim()), DebugUsingDisplay(to),);
}
/// Clone of the regular `assert_min` function but with TypeScript syntax.
pub(crate) fn assert_min_typescript(from: &str, to: &str) {
let out = parse_then_emit(
from,
Config { minify: true },
Syntax::Typescript(Default::default()),
EsVersion::latest(),
);
assert_eq!(DebugUsingDisplay(out.trim()), DebugUsingDisplay(to),);
}
pub(crate) fn assert_pretty(from: &str, to: &str) {
let out = parse_then_emit(
from,
Config { minify: false },
Syntax::default(),
EsVersion::latest(),
);
println!("Expected: {:?}", to);
println!("Actual: {:?}", out);
assert_eq!(DebugUsingDisplay(out.trim()), DebugUsingDisplay(to),);
}
#[track_caller]
fn test_from_to(from: &str, expected: &str) {
let out = parse_then_emit(
from,
Default::default(),
Syntax::default(),
EsVersion::latest(),
);
dbg!(&out);
dbg!(&expected);
assert_eq!(
DebugUsingDisplay(out.trim()),
DebugUsingDisplay(expected.trim()),
);
}
fn test_identical(from: &str) {
test_from_to(from, from)
}
fn test_from_to_custom_config(from: &str, to: &str, cfg: Config, syntax: Syntax) {
let out = parse_then_emit(from, cfg, syntax, EsVersion::latest());
assert_eq!(DebugUsingDisplay(out.trim()), DebugUsingDisplay(to.trim()),);
}
#[test]
fn empty_stmt() {
test_from_to(";", ";");
}
#[test]
fn comment_1() {
test_from_to(
"// foo
a",
"// foo
a;",
);
}
#[test]
fn comment_2() {
test_from_to("a // foo", "a // foo\n;\n");
}
#[test]
fn comment_3() {
test_from_to(
"// foo
// bar
a
// foo
b // bar",
"// foo\n// bar\na;\n// foo\nb // bar\n;\n",
);
}
#[test]
fn comment_4() {
test_from_to("/** foo */ a", "/** foo */ a;");
}
#[test]
fn comment_5() {
test_from_to(
"// foo
// bar
a",
"// foo
// bar
a;",
);
}
#[test]
fn no_octal_escape() {
test_from_to(
r#"'\x00a';
'\x000';
'\x001';
'\x009'"#,
r#"'\x00a';
'\x000';
'\x001';
'\x009';"#,
);
}
#[test]
fn empty_named_export() {
test_from_to("export { }", "export { };");
}
#[test]
fn empty_named_export_min() {
test_from_to_custom_config(
"export { }",
"export{}",
Config { minify: true },
Default::default(),
);
}
#[test]
fn empty_named_export_from() {
test_from_to("export { } from 'foo';", "export { } from 'foo';");
}
#[test]
fn empty_named_export_from_min() {
test_from_to_custom_config(
"export { } from 'foo';",
"export{}from\"foo\"",
Config { minify: true },
Default::default(),
);
}
#[test]
fn named_export_from() {
test_from_to("export { bar } from 'foo';", "export { bar } from 'foo';");
}
#[test]
fn named_export_from_min() {
test_from_to_custom_config(
"export { bar } from 'foo';",
"export{bar}from\"foo\"",
Config { minify: true },
Default::default(),
);
}
#[test]
fn export_namespace_from() {
test_from_to_custom_config(
"export * as Foo from 'foo';",
"export * as Foo from 'foo';",
Default::default(),
Syntax::Es(EsConfig {
..EsConfig::default()
}),
);
}
#[test]
fn export_namespace_from_min() {
test_from_to_custom_config(
"export * as Foo from 'foo';",
"export*as Foo from\"foo\"",
Config { minify: true },
Syntax::Es(EsConfig {
..EsConfig::default()
}),
);
}
#[test]
fn named_and_namespace_export_from() {
test_from_to_custom_config(
"export * as Foo, { bar } from 'foo';",
"export * as Foo, { bar } from 'foo';",
Default::default(),
Syntax::Es(EsConfig {
..EsConfig::default()
}),
);
}
#[test]
fn named_and_namespace_export_from_min() {
test_from_to_custom_config(
"export * as Foo, { bar } from 'foo';",
"export*as Foo,{bar}from\"foo\"",
Config { minify: true },
Syntax::Es(EsConfig {
..EsConfig::default()
}),
);
}
#[test]
fn issue_450() {
test_from_to(
r#"console.log(`
\`\`\`html
<h1>It works!</h1>
\`\`\`
`);"#,
"console.log(`\n\\`\\`\\`html\n<h1>It works!</h1>\n\\`\\`\\`\n`);",
);
}
#[test]
fn issue_546() {
test_from_to(
"import availabilities, * as availabilityFunctions from 'reducers/availabilities';",
"import availabilities, * as availabilityFunctions from 'reducers/availabilities';",
);
}
#[test]
fn issue_637() {
test_from_to(
r"`\
`;", r"`\
`;",
);
}
#[test]
fn issue_639() {
test_from_to(r"`\x1b[33m Yellow \x1b[0m`;", r"`\x1b[33m Yellow \x1b[0m`;");
}
#[test]
fn issue_910() {
test_from_to(
"console.log(\"Hello World\");",
"console.log(\"Hello World\");",
);
test_from_to("console.log('Hello World');", "console.log('Hello World');");
test_from_to(
"console.log(\"Hello\\\" World\");",
"console.log(\"Hello\\\" World\");",
);
test_from_to(
"console.log('Hello\\' World');",
"console.log('Hello\\' World');",
);
}
#[test]
fn tpl_1() {
test_from_to(
"`id '${id}' must be a non-empty string`;",
"`id '${id}' must be a non-empty string`;",
)
}
#[test]
fn tpl_2() {
test_from_to(
"`${Module.wrapper[0]}${script}${Module.wrapper[1]}`",
"`${Module.wrapper[0]}${script}${Module.wrapper[1]}`;",
);
}
#[test]
fn tpl_escape_1() {
test_from_to(
"`${parent.path}\x00${request}`",
"`${parent.path}\x00${request}`;",
)
}
#[test]
fn tpl_escape_2() {
test_from_to("`${arg}\0`", "`${arg}\0`;");
}
#[test]
fn tpl_escape_3() {
test_from_to(
r#"`${resolvedDevice.toLowerCase()}\\`"#,
r#"`${resolvedDevice.toLowerCase()}\\`;"#,
);
}
#[test]
fn tpl_escape_4() {
test_from_to(
r#"`\\\\${firstPart}\\${path.slice(last)}`"#,
r#"`\\\\${firstPart}\\${path.slice(last)}`;"#,
);
}
#[test]
fn tpl_escape_5() {
test_from_to(
r#"const data = text.encode(`${arg}\0`);"#,
r#"const data = text.encode(`${arg}\0`);"#,
);
}
#[test]
fn tpl_escape_6() {
let from = r#"export class MultipartReader {
newLine = encoder.encode("\r\n");
newLineDashBoundary = encoder.encode(`\r\n--${this.boundary}`);
dashBoundaryDash = encoder.encode(`--${this.boundary}--`);
}"#;
let to = r#"export class MultipartReader {
newLine = encoder.encode("\r\n");
newLineDashBoundary = encoder.encode(`\r\n--${this.boundary}`);
dashBoundaryDash = encoder.encode(`--${this.boundary}--`);
}"#;
let out = parse_then_emit(
from,
Default::default(),
Syntax::Typescript(Default::default()),
EsVersion::latest(),
);
assert_eq!(DebugUsingDisplay(out.trim()), DebugUsingDisplay(to.trim()),);
}
#[test]
fn issue_915_1() {
test_identical(r#"relResolveCacheIdentifier = `${parent.path}\x00${request}`;"#);
}
#[test]
fn issue_915_2() {
test_identical(r#"relResolveCacheIdentifier = `${parent.path}\x00${request}`;"#);
}
#[test]
fn issue_915_3() {
test_identical(r#"encoder.encode("\\r\\n");"#);
}
#[test]
fn issue_915_4() {
test_identical(r#"`\\r\\n--${this.boundary}`;"#);
}
#[test]
fn jsx_1() {
test_from_to_custom_config(
"<Foo title=\"name\" desc=\"<empty>\" bool it>foo</Foo>;",
"<Foo title=\"name\" desc=\"<empty>\" bool it>foo</Foo>;",
Default::default(),
Syntax::Es(EsConfig {
jsx: true,
..Default::default()
}),
);
}
#[test]
fn deno_8162() {
test_from_to(
r#""\x00\r\n\x85\u2028\u2029";"#,
r#""\x00\r\n\x85\u2028\u2029";"#,
);
}
#[test]
fn integration_01() {
test_from_to(
r#"
`Unexpected ${unexpectedKeys.length > 1 ? 'keys' : 'key'} ` +
`"${unexpectedKeys.join('", "')}" found in ${argumentName}. ` +
`Expected to find one of the known reducer keys instead: ` +
`"${reducerKeys.join('", "')}". Unexpected keys will be ignored.`
"#,
"
`Unexpected ${unexpectedKeys.length > 1 ? 'keys' : 'key'} ` + `\"${unexpectedKeys.join('\", \
\"')}\" found in ${argumentName}. ` + `Expected to find one of the known reducer keys \
instead: ` + `\"${reducerKeys.join('\", \"')}\". Unexpected keys will be ignored.`;
",
);
}
#[test]
fn integration_01_reduced_01() {
test_from_to(
r#"
`Unexpected ${unexpectedKeys.length > 1 ? 'keys' : 'key'} ` +
`"${unexpectedKeys.join('", "')}" found in ${argumentName}. `
"#,
"
`Unexpected ${unexpectedKeys.length > 1 ? 'keys' : 'key'} ` + `\"${unexpectedKeys.join('\", \
\"')}\" found in ${argumentName}. `;",
);
}
#[test]
fn deno_8541_1() {
test_from_to(
"React.createElement('span', null, '\\u{b7}');",
"React.createElement('span', null, '\\u{b7}');",
);
}
#[test]
fn deno_8925() {
assert_pretty("const 𝒫 = 2;", "const 𝒫 = 2;");
}
#[test]
#[ignore = "Tested by a bundler test"]
fn deno_9620() {
assert_pretty(
"const content = `--------------------------366796e1c748a2fb\r
Content-Disposition: form-data; name=\"payload\"\r
Content-Type: text/plain\r
\r
CONTENT\r
--------------------------366796e1c748a2fb--`",
"`const content = `--------------------------366796e1c748a2fb\\r\\nContent-Disposition: \
form-data; name=\"payload\"\\r\\nContent-Type: \
text/plain\\r\\n\\r\\nCONTENT\\r\\n--------------------------366796e1c748a2fb--`;",
);
}
#[test]
fn test_get_quoted_utf16() {
fn es2020(src: &str, expected: &str) {
assert_eq!(super::get_quoted_utf16(src, EsVersion::Es2020), expected)
}
fn es2020_nonascii(src: &str, expected: &str) {
assert_eq!(super::get_quoted_utf16(src, EsVersion::Es2020), expected)
}
fn es5(src: &str, expected: &str) {
assert_eq!(super::get_quoted_utf16(src, EsVersion::Es5), expected)
}
es2020("abcde", "\"abcde\"");
es2020(
"\x00\r\n\u{85}\u{2028}\u{2029};",
"\"\\0\\r\\n\\x85\\u2028\\u2029;\"",
);
es2020("\n", "\"\\n\"");
es2020("\t", "\"\\t\"");
es2020("'string'", "\"'string'\"");
es2020("\u{0}", "\"\\0\"");
es2020("\u{1}", "\"\\x01\"");
es2020("\u{1000}", "\"\\u1000\"");
es2020("\u{ff}", "\"\\xff\"");
es2020("\u{10ffff}", "\"\\u{10FFFF}\"");
es2020("😀", "\"\\u{1F600}\"");
es5("ퟻ", "\"\\uD7FB\"");
es2020_nonascii("\u{FEFF}abc", "\"\\uFEFFabc\"");
es2020_nonascii("\u{10ffff}", "\"\\u{10FFFF}\"");
es5("\u{FEFF}abc", "\"\\uFEFFabc\"");
es5("\u{10ffff}", "\"\\uDBFF\\uDFFF\"");
es5("\u{FFFF}", "\"\\uFFFF\"");
es5("😀", "\"\\uD83D\\uDE00\"");
es5("ퟻ", "\"\\uD7FB\"");
}
#[test]
fn deno_8541_2() {
test_from_to(
"React.createElement('span', null, '\\u00b7');",
"React.createElement('span', null, '\\u00b7');",
);
}
#[test]
fn issue_1452_1() {
assert_min("async foo => 0", "async foo=>0");
}
#[test]
fn issue_1619_1() {
assert_min_target(
"\"\\x00\" + \"\\x31\"",
"\"\\0\"+\"1\"",
EsVersion::latest(),
);
}
#[test]
fn issue_1619_2() {
assert_min_target(
"\"\\x00\" + \"\\x31\"",
"\"\\0\"+\"1\"",
EsVersion::latest(),
);
}
#[test]
fn issue_1619_3() {
assert_eq!(get_quoted_utf16("\x00\x31", EsVersion::Es3), "\"\\x001\"");
}
fn check_latest(src: &str, expected: &str) {
let actual = parse_then_emit(
src,
Config { minify: false },
Default::default(),
EsVersion::latest(),
);
assert_eq!(expected, actual.trim());
}
#[test]
fn invalid_unicode_in_ident() {
check_latest("\\ud83d;", "\\ud83d;");
}
#[test]
fn test_escape_with_source_str() {
check_latest("'\\ud83d'", "'\\ud83d';");
check_latest(
"'\\ud83d\\ud83d\\ud83d\\ud83d\\ud83d'",
"'\\ud83d\\ud83d\\ud83d\\ud83d\\ud83d';",
);
}
#[derive(Debug, Clone)]
struct Buf(Arc<RwLock<Vec<u8>>>);
impl Write for Buf {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.0.write().unwrap().write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.0.write().unwrap().flush()
}
}
#[test]
fn issue_2213() {
assert_min("a - -b * c", "a- -b*c")
}
#[test]
fn issue3617() {
// Convert characters to es5 compatibility code
let from = r"// a string of all valid unicode whitespaces
module.exports = '\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u2000\u2001\u2002' +
'\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000\u2028\u2029\uFEFF' + '\u{a0}';";
let expected = r#"// a string of all valid unicode whitespaces
module.exports = "\t\n\v\f\r \xa0\u1680\u2000\u2001\u2002" + "\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000\u2028\u2029\uFEFF" + "\xa0";"#;
let out = parse_then_emit(from, Default::default(), Syntax::default(), EsVersion::Es5);
dbg!(&out);
dbg!(&expected);
assert_eq!(
DebugUsingDisplay(out.trim()),
DebugUsingDisplay(expected.trim()),
);
}
#[test]
fn issue3617_1() {
// Print characters as is for ECMA target > 5
let from = r"// a string of all valid unicode whitespaces
module.exports = '\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u2000\u2001\u2002' +
'\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000\u2028\u2029\uFEFF' + '\u{a0}';";
let expected = r#"// a string of all valid unicode whitespaces
module.exports = '\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u2000\u2001\u2002' + '\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000\u2028\u2029\uFEFF' + '\u{a0}';"#;
let out = parse_then_emit(
from,
Default::default(),
Syntax::default(),
EsVersion::Es2022,
);
dbg!(&out);
dbg!(&expected);
assert_eq!(
DebugUsingDisplay(out.trim()),
DebugUsingDisplay(expected.trim()),
);
}
| 23.83844 | 189 | 0.543118 |
67bd20746c106dc27f92c317c2b6939a0a0bfb30 | 3,825 | use std::any::Any;
use rotor::{Machine, Response, EventSet, PollOpt, Evented};
use rotor::{Scope, GenericScope, Void};
use rotor::mio::{TryAccept};
use {StreamSocket, Accept};
/// Trait which must be implemented for a state machine to accept connection
///
/// This basically provides alternative constructor for the state machine.
pub trait Accepted: Machine {
type Seed: Clone;
type Socket: StreamSocket;
/// The constructor of the state machine from the accepted connection
fn accepted(sock: Self::Socket, seed: <Self as Accepted>::Seed,
scope: &mut Scope<Self::Context>)
-> Response<Self, Void>;
}
impl<M, A> Accept<M, A>
where A: TryAccept<Output=M::Socket> + Evented + Any,
M: Accepted,
{
pub fn new<S: GenericScope>(sock: A,
seed: <M as Accepted>::Seed, scope: &mut S)
-> Response<Self, Void>
{
match scope.register(&sock, EventSet::readable(), PollOpt::edge()) {
Ok(()) => {}
Err(e) => return Response::error(Box::new(e)),
}
Response::ok(Accept::Server(sock, seed))
}
}
impl<M, A> Machine for Accept<M, A>
where A: TryAccept<Output=M::Socket> + Evented + Any,
M: Accepted,
{
type Context = M::Context;
type Seed = (A::Output, <M as Accepted>::Seed);
fn create((sock, seed): Self::Seed, scope: &mut Scope<Self::Context>)
-> Response<Self, Void>
{
M::accepted(sock, seed, scope).wrap(Accept::Connection)
}
fn ready(self, events: EventSet, scope: &mut Scope<Self::Context>)
-> Response<Self, Self::Seed>
{
match self {
Accept::Server(a, s) => {
match a.accept() {
Ok(Some(sock)) => {
let seed = (sock, s.clone());
Response::spawn(Accept::Server(a, s), seed)
}
Ok(None) => {
Response::ok(Accept::Server(a, s))
}
Err(_) => {
// TODO(tailhook) maybe log the error
Response::ok(Accept::Server(a, s))
}
}
}
Accept::Connection(m) => {
m.ready(events, scope)
.map(Accept::Connection, |_| unreachable!())
}
}
}
fn spawned(self, _scope: &mut Scope<Self::Context>)
-> Response<Self, Self::Seed>
{
match self {
Accept::Server(a, s) => {
match a.accept() {
Ok(Some(sock)) => {
let seed = (sock, s.clone());
Response::spawn(Accept::Server(a, s), seed)
}
Ok(None) => {
Response::ok(Accept::Server(a, s))
}
Err(_) => {
// TODO(tailhook) maybe log the error
Response::ok(Accept::Server(a, s))
}
}
}
Accept::Connection(_) => {
unreachable!();
}
}
}
fn timeout(self, scope: &mut Scope<Self::Context>)
-> Response<Self, Self::Seed>
{
match self {
Accept::Server(..) => unreachable!(),
Accept::Connection(m) => {
m.timeout(scope).map(Accept::Connection, |_| unreachable!())
}
}
}
fn wakeup(self, scope: &mut Scope<Self::Context>)
-> Response<Self, Self::Seed>
{
match self {
me @ Accept::Server(..) => Response::ok(me),
Accept::Connection(m) => {
m.wakeup(scope).map(Accept::Connection, |_| unreachable!())
}
}
}
}
| 30.846774 | 76 | 0.469542 |
0a3bc44d8092f919bfcc79bb9cc8e5cbf2354639 | 1,821 | #![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use panoptis_ledger::shred::Shred;
use solana_sdk::pubkey;
use solana_sdk::timing::timestamp;
use std::{
collections::HashMap,
net::UdpSocket,
sync::{atomic::AtomicU64, Arc},
};
use test::Bencher;
#[bench]
fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup();
let leader_pubkey = pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const NUM_SHREDS: usize = 32;
let shreds = vec![Shred::new_empty_data_shred(); NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
let id = pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&id, timestamp());
cluster_info.insert_info(contact_info);
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
}
let cluster_info = Arc::new(cluster_info);
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(&stakes));
let shreds = Arc::new(shreds);
let last_datapoint = Arc::new(AtomicU64::new(0));
bencher.iter(move || {
let shreds = shreds.clone();
broadcast_shreds(
&socket,
&shreds,
&peers_and_stakes,
&peers,
&last_datapoint,
&mut TransmitShredsStats::default(),
)
.unwrap();
});
}
| 33.109091 | 86 | 0.668314 |
e8d3ffb3ef5e4b8202312a71f910a4855450f5b9 | 3,008 | //! Test command for checking the IR verifier.
//!
//! The `test verifier` test command looks for annotations on instructions like this:
//!
//! ```clif
//! jump ebb3 ; error: jump to non-existent EBB
//! ```
//!
//! This annotation means that the verifier is expected to given an error for the jump instruction
//! containing the substring "jump to non-existent EBB".
use crate::match_directive::match_directive;
use crate::subtest::{Context, SubTest, SubtestResult};
use cranelift_codegen::ir::Function;
use cranelift_codegen::verify_function;
use cranelift_reader::TestCommand;
use std::borrow::{Borrow, Cow};
use std::fmt::Write;
struct TestVerifier;
pub fn subtest(parsed: &TestCommand) -> SubtestResult<Box<dyn SubTest>> {
assert_eq!(parsed.command, "verifier");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestVerifier))
}
}
impl SubTest for TestVerifier {
fn name(&self) -> &'static str {
"verifier"
}
fn needs_verifier(&self) -> bool {
// Running the verifier before this test would defeat its purpose.
false
}
fn run(&self, func: Cow<Function>, context: &Context) -> SubtestResult<()> {
let func = func.borrow();
// Scan source annotations for "error:" directives.
let mut expected = Vec::new();
for comment in &context.details.comments {
if let Some(tail) = match_directive(comment.text, "error:") {
expected.push((comment.entity, tail));
}
}
match verify_function(func, context.flags_or_isa()) {
Ok(()) if expected.is_empty() => Ok(()),
Ok(()) => Err(format!("passed, but expected errors: {:?}", expected)),
Err(ref errors) if expected.is_empty() => {
Err(format!("expected no error, but got:\n{}", errors))
}
Err(errors) => {
let mut errors = errors.0;
let mut msg = String::new();
// For each expected error, find a suitable match.
for expect in expected {
let pos = errors
.iter()
.position(|err| err.location == expect.0 && err.message.contains(expect.1));
match pos {
None => {
writeln!(msg, " expected error {}: {}", expect.0, expect.1).unwrap();
}
Some(pos) => {
errors.swap_remove(pos);
}
}
}
// Report remaining errors.
for err in errors {
writeln!(msg, "unexpected error {}", err).unwrap();
}
if msg.is_empty() {
Ok(())
} else {
Err(msg)
}
}
}
}
}
| 31.663158 | 100 | 0.51363 |
08bad94e0127d22dde145f85a71197bc03298699 | 7,162 | // Copyright 2020 Nym Technologies SA
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::config::persistence::key_pathfinder::ClientKeyPathfinder;
use crypto::asymmetric::{encryption, identity};
use gateway_requests::registration::handshake::SharedKeys;
use log::*;
use nymsphinx::acknowledgements::AckKey;
use rand::{CryptoRng, RngCore};
use std::io;
use std::sync::Arc;
// Note: to support key rotation in the future, all keys will require adding an extra smart pointer,
// most likely an AtomicCell, or if it doesn't work as I think it does, a Mutex. Although I think
// AtomicCell includes a Mutex implicitly if the underlying type does not work atomically.
// And I guess there will need to be some mechanism for a grace period when you can still
// use the old key after new one was issued.
// Remember that Arc<T> has Deref implementation for T
pub struct KeyManager {
/// identity key associated with the client instance.
identity_keypair: Arc<identity::KeyPair>,
/// encryption key associated with the client instance.
encryption_keypair: Arc<encryption::KeyPair>,
/// shared key derived with the gateway during "registration handshake"
gateway_shared_key: Option<Arc<SharedKeys>>,
/// key used for producing and processing acknowledgement packets.
ack_key: Arc<AckKey>,
}
// The expected flow of a KeyManager "lifetime" is as follows:
/*
1. ::new() is called during client-init
2. after gateway registration is completed [in init] ::insert_gateway_shared_key() is called
3. ::store_keys() is called before init finishes execution.
4. ::load_keys() is called at the beginning of each subsequent client-run
5. [not implemented] ::rotate_keys() is called periodically during client-run I presume?
*/
impl KeyManager {
// this is actually **NOT** dead code
// I have absolutely no idea why the compiler insists it's unused. The call happens during client::init::execute
#[allow(dead_code)]
/// Creates new instance of a [`KeyManager`]
pub fn new<R>(rng: &mut R) -> Self
where
R: RngCore + CryptoRng,
{
KeyManager {
identity_keypair: Arc::new(identity::KeyPair::new(rng)),
encryption_keypair: Arc::new(encryption::KeyPair::new(rng)),
gateway_shared_key: None,
ack_key: Arc::new(AckKey::new(rng)),
}
}
// this is actually **NOT** dead code
// I have absolutely no idea why the compiler insists it's unused. The call happens during client::init::execute
#[allow(dead_code)]
/// After shared key with the gateway is derived, puts its ownership to this instance of a [`KeyManager`].
pub fn insert_gateway_shared_key(&mut self, gateway_shared_key: SharedKeys) {
self.gateway_shared_key = Some(Arc::new(gateway_shared_key))
}
/// Loads previously stored keys from the disk.
pub fn load_keys(client_pathfinder: &ClientKeyPathfinder) -> io::Result<Self> {
let identity_keypair: identity::KeyPair =
pemstore::load_keypair(&pemstore::KeyPairPath::new(
client_pathfinder.private_identity_key().to_owned(),
client_pathfinder.public_identity_key().to_owned(),
))?;
let encryption_keypair: encryption::KeyPair =
pemstore::load_keypair(&pemstore::KeyPairPath::new(
client_pathfinder.private_encryption_key().to_owned(),
client_pathfinder.public_encryption_key().to_owned(),
))?;
let gateway_shared_key: SharedKeys =
pemstore::load_key(&client_pathfinder.gateway_shared_key().to_owned())?;
let ack_key: AckKey = pemstore::load_key(&client_pathfinder.ack_key().to_owned())?;
// TODO: ack key is never stored so it is generated now. But perhaps it should be stored
// after all for consistency sake?
Ok(KeyManager {
identity_keypair: Arc::new(identity_keypair),
encryption_keypair: Arc::new(encryption_keypair),
gateway_shared_key: Some(Arc::new(gateway_shared_key)),
ack_key: Arc::new(ack_key),
})
}
// this is actually **NOT** dead code
// I have absolutely no idea why the compiler insists it's unused. The call happens during client::init::execute
#[allow(dead_code)]
/// Stores all available keys on the disk.
// While perhaps there is no much point in storing the `AckKey` on the disk,
// it is done so for the consistency sake so that you wouldn't require an rng instance
// during `load_keys` to generate the said key.
pub fn store_keys(&self, client_pathfinder: &ClientKeyPathfinder) -> io::Result<()> {
pemstore::store_keypair(
self.identity_keypair.as_ref(),
&pemstore::KeyPairPath::new(
client_pathfinder.private_identity_key().to_owned(),
client_pathfinder.public_identity_key().to_owned(),
),
)?;
pemstore::store_keypair(
self.encryption_keypair.as_ref(),
&pemstore::KeyPairPath::new(
client_pathfinder.private_encryption_key().to_owned(),
client_pathfinder.public_encryption_key().to_owned(),
),
)?;
pemstore::store_key(self.ack_key.as_ref(), &client_pathfinder.ack_key())?;
match self.gateway_shared_key.as_ref() {
None => warn!("No gateway shared key available to store!"),
Some(gate_key) => {
pemstore::store_key(gate_key.as_ref(), &client_pathfinder.gateway_shared_key())?
}
}
Ok(())
}
/// Gets an atomically reference counted pointer to [`identity::KeyPair`].
pub fn identity_keypair(&self) -> Arc<identity::KeyPair> {
Arc::clone(&self.identity_keypair)
}
/// Gets an atomically reference counted pointer to [`encryption::KeyPair`].
pub fn encryption_keypair(&self) -> Arc<encryption::KeyPair> {
Arc::clone(&self.encryption_keypair)
}
/// Gets an atomically reference counted pointer to [`SharedKey`].
// since this function is not fully public, it is not expected to be used externally and
// hence it's up to us to ensure it's called in correct context
pub fn gateway_shared_key(&self) -> Arc<SharedKeys> {
Arc::clone(
&self
.gateway_shared_key
.as_ref()
.expect("tried to unwrap empty gateway key!"),
)
}
/// Gets an atomically reference counted pointer to [`AckKey`].
pub fn ack_key(&self) -> Arc<AckKey> {
Arc::clone(&self.ack_key)
}
}
| 42.378698 | 116 | 0.666434 |
e29adcc6e56a3226fc28b172cca4d7739c1d604f | 2,660 | //! Generic Packs
use crate::basic::com::{Range, Span, Store, MultiStore};
use crate::model::ast::*;
/// A Pack of Generic Paramters, introducing types and constants.
///
/// Used in declarations of types, extensions, implementations, and functions.
pub type GenericParameterPack = GenericPack<Identifier>;
/// A Pack of Generic Variables.
///
/// Used in instantiations of types, extensions, implementations, and functions
/// as well as within expressions.
pub type GenericVariablePack = GenericPack<GenericVariable>;
/// A Generic Pack.
///
/// Used to represent any appearance of `[...]`, with the appropriate `T`.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct GenericPack<T> {
/// Elements of the pack.
pub elements: Id<[T]>,
/// Offsets of the commas separating the elements, an absent comma is placed
/// at the offset of the last character of the element it would have followed.
pub commas: Id<[u32]>,
/// Offset of the opening bracket.
pub open: u32,
/// Offset of the closing bracket, an absent bracket is placed at the offset
/// of the last character of the field it would have followed.
pub close: u32,
}
impl<T> GenericPack<T> {
/// Replicates from the Source to the Target store.
pub fn replicate<Source, Target>(
&self,
source: &Source,
target: &mut Target
)
-> Self
where
Source: MultiStore<T> + MultiStore<u32>,
Target: MultiStore<T> + MultiStore<u32>,
{
let elements = target.push_slice(source.get_slice(self.elements));
let commas = target.push_slice(source.get_slice(self.commas));
GenericPack { elements, commas, open: self.open, close: self.close, }
}
}
impl<T> Span for GenericPack<T> {
/// Returns the range spanned by the pack.
fn span(&self) -> Range {
Range::half_open(self.open, self.close + 1)
}
}
/// A Generic Variable
///
/// Used to represent either a type or value.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub enum GenericVariable {
/// A literal value variable.
Literal(Literal, Range),
/// A type variable.
Type(TypeId),
/// A value variable.
Value(VariableIdentifier),
}
impl GenericVariable {
/// Returns the range of the pattern.
pub fn range<S>(&self, store: &S) -> Range
where
S: Store<GenericVariablePack> + Store<Type>,
{
use GenericVariable::*;
match *self {
Literal(_, range) => range,
Type(typ) => store.get_range(typ),
Value(name) => name.span(),
}
}
}
| 30.227273 | 82 | 0.637218 |
c14922c7262b13b42f45a62ea60ab11ce00ac220 | 1,824 | use crate::{
fib::{Fiber, FiberRoot, FiberState},
thr::prelude::*,
};
use core::{
ops::{Generator, GeneratorState},
pin::Pin,
};
/// Fiber for [`Generator`].
///
/// Can be created with [`fib::new`](crate::fib::new).
pub struct FiberGen<G>(G)
where
G: Generator;
impl<G> Fiber for FiberGen<G>
where
G: Generator,
{
type Input = ();
type Return = G::Return;
type Yield = G::Yield;
#[inline]
fn resume(self: Pin<&mut Self>, (): ()) -> FiberState<G::Yield, G::Return> {
let gen = unsafe { self.map_unchecked_mut(|x| &mut x.0) };
gen.resume(()).into()
}
}
impl<G> FiberRoot for FiberGen<G>
where
G: Generator<Yield = (), Return = ()>,
G: Send + 'static,
{
#[inline]
fn advance(self: Pin<&mut Self>) -> bool {
match self.resume(()) {
FiberState::Yielded(()) => true,
FiberState::Complete(()) => false,
}
}
}
impl<Y, R> From<GeneratorState<Y, R>> for FiberState<Y, R> {
#[inline]
fn from(state: GeneratorState<Y, R>) -> Self {
match state {
GeneratorState::Yielded(val) => FiberState::Yielded(val),
GeneratorState::Complete(val) => FiberState::Complete(val),
}
}
}
/// Creates a fiber from the generator `gen`.
///
/// This type of fiber yields on each generator `yield`.
#[inline]
pub fn new<G>(gen: G) -> FiberGen<G>
where
G: Generator,
{
FiberGen(gen)
}
/// Extends [`ThrToken`](crate::thr::ThrToken) types with `add` method.
pub trait ThrFiberGen: ThrToken {
/// Adds a fiber for the generator `gen` to the fiber chain.
#[inline]
fn add<G>(self, gen: G)
where
G: Generator<Yield = (), Return = ()>,
G: Send + 'static,
{
self.add_fib(new(gen))
}
}
impl<T: ThrToken> ThrFiberGen for T {}
| 22.518519 | 80 | 0.564693 |
28abeb0ced789a72f5d11eef9bba38c9fb24d5ee | 961 | use mongodb::{bson::doc, error::Error, Database};
use serenity::model::id::UserId;
use super::Player;
impl Player {
/// Find a Player from a user_id. If no Player exists for the user, one will be created.
pub async fn from_user_id(db: &Database, user_id: UserId) -> Result<Self, Error> {
let mut player = Player::from_user_id_optional(db, user_id).await?;
if player.is_none() {
// Make a new player
player = Some(Player::new(user_id).insert(db).await?);
}
Ok(player.expect("Creating a player still didn't work"))
}
/// Tries to find a Player that has a matching user id
pub async fn from_user_id_optional(
db: &Database,
user_id: UserId,
) -> Result<Option<Self>, Error> {
let filter = doc! { "user_id": user_id.as_u64() };
let doc = Player::collection(db).find_one(filter, None).await?;
let mut player: Option<Self> = None;
if let Some(doc) = doc {
player = Some(Self::from_doc(db, doc).await?);
}
Ok(player)
}
}
| 27.457143 | 89 | 0.673257 |
5d60cf20cafc22ca98b2fff2cdd0e4e89b7550b1 | 5,912 | #[doc = "Reader of register FLOWCTL"]
pub type R = crate::R<u32, super::FLOWCTL>;
#[doc = "Writer for register FLOWCTL"]
pub type W = crate::W<u32, super::FLOWCTL>;
#[doc = "Register FLOWCTL `reset()`'s with value 0"]
impl crate::ResetValue for super::FLOWCTL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `FCBBPA`"]
pub type FCBBPA_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FCBBPA`"]
pub struct FCBBPA_W<'a> {
w: &'a mut W,
}
impl<'a> FCBBPA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `TFE`"]
pub type TFE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TFE`"]
pub struct TFE_W<'a> {
w: &'a mut W,
}
impl<'a> TFE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `RFE`"]
pub type RFE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RFE`"]
pub struct RFE_W<'a> {
w: &'a mut W,
}
impl<'a> RFE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `UP`"]
pub type UP_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UP`"]
pub struct UP_W<'a> {
w: &'a mut W,
}
impl<'a> UP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `DZQP`"]
pub type DZQP_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DZQP`"]
pub struct DZQP_W<'a> {
w: &'a mut W,
}
impl<'a> DZQP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `PT`"]
pub type PT_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `PT`"]
pub struct PT_W<'a> {
w: &'a mut W,
}
impl<'a> PT_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xffff << 16)) | (((value as u32) & 0xffff) << 16);
self.w
}
}
impl R {
#[doc = "Bit 0 - Flow Control Busy or Back-pressure Activate"]
#[inline(always)]
pub fn fcbbpa(&self) -> FCBBPA_R {
FCBBPA_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Transmit Flow Control Enable"]
#[inline(always)]
pub fn tfe(&self) -> TFE_R {
TFE_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Receive Flow Control Enable"]
#[inline(always)]
pub fn rfe(&self) -> RFE_R {
RFE_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Unicast Pause Frame Detect"]
#[inline(always)]
pub fn up(&self) -> UP_R {
UP_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 7 - Disable Zero-Quanta Pause"]
#[inline(always)]
pub fn dzqp(&self) -> DZQP_R {
DZQP_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bits 16:31 - Pause Time"]
#[inline(always)]
pub fn pt(&self) -> PT_R {
PT_R::new(((self.bits >> 16) & 0xffff) as u16)
}
}
impl W {
#[doc = "Bit 0 - Flow Control Busy or Back-pressure Activate"]
#[inline(always)]
pub fn fcbbpa(&mut self) -> FCBBPA_W {
FCBBPA_W { w: self }
}
#[doc = "Bit 1 - Transmit Flow Control Enable"]
#[inline(always)]
pub fn tfe(&mut self) -> TFE_W {
TFE_W { w: self }
}
#[doc = "Bit 2 - Receive Flow Control Enable"]
#[inline(always)]
pub fn rfe(&mut self) -> RFE_W {
RFE_W { w: self }
}
#[doc = "Bit 3 - Unicast Pause Frame Detect"]
#[inline(always)]
pub fn up(&mut self) -> UP_W {
UP_W { w: self }
}
#[doc = "Bit 7 - Disable Zero-Quanta Pause"]
#[inline(always)]
pub fn dzqp(&mut self) -> DZQP_W {
DZQP_W { w: self }
}
#[doc = "Bits 16:31 - Pause Time"]
#[inline(always)]
pub fn pt(&mut self) -> PT_W {
PT_W { w: self }
}
}
| 28.018957 | 90 | 0.52588 |
28bd3d5af446a0bc11d1064b8f275213980d9a0e | 1,054 | // Copyright 2020-2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use std::error::Error;
/// a Random Number Generator
pub trait SecureRng {
/// fills the buffer with secure random data. `buf` is the output buffer.
fn random(&mut self, buf: &mut [u8]) -> Result<(), Box<dyn Error + 'static>>;
}
/// A deterministic Random Number Generator Extension
pub trait DeterministicRng: SecureRng {
/// reseeds the random number generator with a seed.
fn reseed(&mut self, seed: &[u8]) -> Result<(), Box<dyn Error + 'static>>;
}
/// A secret key generation algorithm
pub trait SecretKeyGen {
/// generate a new private key in the buffer. `buf` is the output buffer.
fn new_secret_key(&self, buf: &mut [u8], rng: &mut dyn SecureRng) -> Result<usize, Box<dyn Error + 'static>>;
}
/// A public key generation algorithm
pub trait PublicKeyGen {
/// generate a new public key in the buffer. `buf` is the output buffer.
fn get_pub_key(&self, buf: &mut [u8], secret_key: &[u8]) -> Result<usize, Box<dyn Error + 'static>>;
}
| 36.344828 | 113 | 0.679317 |
71b18047b5c8472de238d33dc8aa0ee27e4e3cea | 34,620 | //! Type agnostic columnar data structure.
pub use crate::prelude::ChunkCompare;
use crate::prelude::*;
use arrow::array::ArrayRef;
use polars_arrow::prelude::QuantileInterpolOptions;
pub(crate) mod arithmetic;
mod comparison;
mod from;
pub mod implementations;
mod into;
pub(crate) mod iterator;
pub mod ops;
mod series_trait;
#[cfg(feature = "private")]
pub mod unstable;
use crate::chunked_array::ops::rolling_window::RollingOptions;
#[cfg(feature = "rank")]
use crate::prelude::unique::rank::rank;
#[cfg(feature = "groupby_list")]
use crate::utils::Wrap;
use crate::utils::{split_ca, split_series};
use crate::{series::arithmetic::coerce_lhs_rhs, POOL};
#[cfg(feature = "groupby_list")]
use ahash::RandomState;
pub use from::*;
use num::NumCast;
use rayon::prelude::*;
pub use series_trait::*;
use std::borrow::Cow;
#[cfg(feature = "groupby_list")]
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use std::sync::Arc;
/// # Series
/// The columnar data type for a DataFrame.
///
/// Most of the available functions are defined in the [SeriesTrait trait](crate::series::SeriesTrait).
///
/// The `Series` struct consists
/// of typed [ChunkedArray](../chunked_array/struct.ChunkedArray.html)'s. To quickly cast
/// a `Series` to a `ChunkedArray` you can call the method with the name of the type:
///
/// ```
/// # use polars_core::prelude::*;
/// let s: Series = [1, 2, 3].iter().collect();
/// // Quickly obtain the ChunkedArray wrapped by the Series.
/// let chunked_array = s.i32().unwrap();
/// ```
///
/// ## Arithmetic
///
/// You can do standard arithmetic on series.
/// ```
/// # use polars_core::prelude::*;
/// let s = Series::new("a", [1 , 2, 3]);
/// let out_add = &s + &s;
/// let out_sub = &s - &s;
/// let out_div = &s / &s;
/// let out_mul = &s * &s;
/// ```
///
/// Or with series and numbers.
///
/// ```
/// # use polars_core::prelude::*;
/// let s: Series = (1..3).collect();
/// let out_add_one = &s + 1;
/// let out_multiply = &s * 10;
///
/// // Could not overload left hand side operator.
/// let out_divide = 1.div(&s);
/// let out_add = 1.add(&s);
/// let out_subtract = 1.sub(&s);
/// let out_multiply = 1.mul(&s);
/// ```
///
/// ## Comparison
/// You can obtain boolean mask by comparing series.
///
/// ```
/// # use polars_core::prelude::*;
/// let s = Series::new("dollars", &[1, 2, 3]);
/// let mask = s.equal(1);
/// let valid = [true, false, false].iter();
/// assert!(mask
/// .into_iter()
/// .map(|opt_bool| opt_bool.unwrap()) // option, because series can be null
/// .zip(valid)
/// .all(|(a, b)| a == *b))
/// ```
///
/// See all the comparison operators in the [CmpOps trait](../chunked_array/comparison/trait.CmpOps.html)
///
/// ## Iterators
/// The Series variants contain differently typed [ChunkedArray's](../chunked_array/struct.ChunkedArray.html).
/// These structs can be turned into iterators, making it possible to use any function/ closure you want
/// on a Series.
///
/// These iterators return an `Option<T>` because the values of a series may be null.
///
/// ```
/// use polars_core::prelude::*;
/// let pi = 3.14;
/// let s = Series::new("angle", [2f32 * pi, pi, 1.5 * pi].as_ref());
/// let s_cos: Series = s.f32()
/// .expect("series was not an f32 dtype")
/// .into_iter()
/// .map(|opt_angle| opt_angle.map(|angle| angle.cos()))
/// .collect();
/// ```
///
/// ## Creation
/// Series can be create from different data structures. Below we'll show a few ways we can create
/// a Series object.
///
/// ```
/// # use polars_core::prelude::*;
/// // Series van be created from Vec's, slices and arrays
/// Series::new("boolean series", &[true, false, true]);
/// Series::new("int series", &[1, 2, 3]);
/// // And can be nullable
/// Series::new("got nulls", &[Some(1), None, Some(2)]);
///
/// // Series can also be collected from iterators
/// let from_iter: Series = (0..10)
/// .into_iter()
/// .collect();
///
/// ```
#[derive(Clone)]
#[must_use]
pub struct Series(pub Arc<dyn SeriesTrait>);
#[cfg(feature = "groupby_list")]
impl PartialEq for Wrap<Series> {
fn eq(&self, other: &Self) -> bool {
self.0.series_equal_missing(other)
}
}
#[cfg(feature = "groupby_list")]
impl Eq for Wrap<Series> {}
#[cfg(feature = "groupby_list")]
impl Hash for Wrap<Series> {
fn hash<H: Hasher>(&self, state: &mut H) {
let rs = RandomState::with_seeds(0, 0, 0, 0);
let h = UInt64Chunked::new_from_aligned_vec("", self.0.vec_hash(rs)).sum();
h.hash(state)
}
}
impl Series {
pub(crate) fn get_inner_mut(&mut self) -> &mut dyn SeriesTrait {
if Arc::weak_count(&self.0) + Arc::strong_count(&self.0) != 1 {
self.0 = self.0.clone_inner();
}
Arc::get_mut(&mut self.0).expect("implementation error")
}
/// Rename series.
pub fn rename(&mut self, name: &str) -> &mut Series {
self.get_inner_mut().rename(name);
self
}
/// Shrink the capacity of this array to fit it's length.
pub fn shrink_to_fit(&mut self) {
self.get_inner_mut().shrink_to_fit()
}
/// Append arrow array of same datatype.
pub fn append_array(&mut self, other: ArrayRef) -> Result<&mut Self> {
self.get_inner_mut().append_array(other)?;
Ok(self)
}
/// Append a Series of the same type in place.
pub fn append(&mut self, other: &Series) -> Result<&mut Self> {
self.get_inner_mut().append(other)?;
Ok(self)
}
pub fn sort(&self, reverse: bool) -> Self {
self.sort_with(SortOptions {
descending: reverse,
..Default::default()
})
}
/// Only implemented for numeric types
pub fn as_single_ptr(&mut self) -> Result<usize> {
self.get_inner_mut().as_single_ptr()
}
/// Cast `[Series]` to another `[DataType]`
pub fn cast(&self, dtype: &DataType) -> Result<Self> {
self.0.cast(dtype)
}
/// Compute the sum of all values in this Series.
/// Returns `None` if the array is empty or only contains null values.
///
/// If the [`DataType`] is one of `{Int8, UInt8, Int16, UInt16}` the `Series` is
/// first cast to `Int64` to prevent overflow issues.
///
/// ```
/// # use polars_core::prelude::*;
/// let s = Series::new("days", &[1, 2, 3]);
/// assert_eq!(s.sum(), Some(6));
/// ```
pub fn sum<T>(&self) -> Option<T>
where
T: NumCast,
{
self.sum_as_series()
.cast(&DataType::Float64)
.ok()
.and_then(|s| s.f64().unwrap().get(0).and_then(T::from))
}
/// Returns the minimum value in the array, according to the natural order.
/// Returns an option because the array is nullable.
/// ```
/// # use polars_core::prelude::*;
/// let s = Series::new("days", [1, 2, 3].as_ref());
/// assert_eq!(s.min(), Some(1));
/// ```
pub fn min<T>(&self) -> Option<T>
where
T: NumCast,
{
self.min_as_series()
.cast(&DataType::Float64)
.ok()
.and_then(|s| s.f64().unwrap().get(0).and_then(T::from))
}
/// Returns the maximum value in the array, according to the natural order.
/// Returns an option because the array is nullable.
/// ```
/// # use polars_core::prelude::*;
/// let s = Series::new("days", [1, 2, 3].as_ref());
/// assert_eq!(s.max(), Some(3));
/// ```
pub fn max<T>(&self) -> Option<T>
where
T: NumCast,
{
self.max_as_series()
.cast(&DataType::Float64)
.ok()
.and_then(|s| s.f64().unwrap().get(0).and_then(T::from))
}
/// Explode a list or utf8 Series. This expands every item to a new row..
pub fn explode(&self) -> Result<Series> {
match self.dtype() {
DataType::List(_) => self.list().unwrap().explode(),
DataType::Utf8 => self.utf8().unwrap().explode(),
_ => Err(PolarsError::InvalidOperation(
format!(
"explode not supported for Series with dtype {:?}",
self.dtype()
)
.into(),
)),
}
}
/// Check if float value is NaN (note this is different than missing/ null)
pub fn is_nan(&self) -> Result<BooleanChunked> {
match self.dtype() {
DataType::Float32 => Ok(self.f32().unwrap().is_nan()),
DataType::Float64 => Ok(self.f64().unwrap().is_nan()),
_ => Ok(BooleanChunked::full(self.name(), false, self.len())),
}
}
/// Check if float value is NaN (note this is different than missing/ null)
pub fn is_not_nan(&self) -> Result<BooleanChunked> {
match self.dtype() {
DataType::Float32 => Ok(self.f32().unwrap().is_not_nan()),
DataType::Float64 => Ok(self.f64().unwrap().is_not_nan()),
_ => Ok(BooleanChunked::full(self.name(), true, self.len())),
}
}
/// Check if float value is finite
pub fn is_finite(&self) -> Result<BooleanChunked> {
match self.dtype() {
DataType::Float32 => Ok(self.f32().unwrap().is_finite()),
DataType::Float64 => Ok(self.f64().unwrap().is_finite()),
_ => Err(PolarsError::InvalidOperation(
format!(
"is_nan not supported for series with dtype {:?}",
self.dtype()
)
.into(),
)),
}
}
/// Check if float value is finite
pub fn is_infinite(&self) -> Result<BooleanChunked> {
match self.dtype() {
DataType::Float32 => Ok(self.f32().unwrap().is_infinite()),
DataType::Float64 => Ok(self.f64().unwrap().is_infinite()),
_ => Err(PolarsError::InvalidOperation(
format!(
"is_nan not supported for series with dtype {:?}",
self.dtype()
)
.into(),
)),
}
}
/// Create a new ChunkedArray with values from self where the mask evaluates `true` and values
/// from `other` where the mask evaluates `false`
#[cfg(feature = "zip_with")]
#[cfg_attr(docsrs, doc(cfg(feature = "zip_with")))]
pub fn zip_with(&self, mask: &BooleanChunked, other: &Series) -> Result<Series> {
let (lhs, rhs) = coerce_lhs_rhs(self, other)?;
lhs.zip_with_same_type(mask, rhs.as_ref())
}
/// Cast a datelike Series to their physical representation.
/// Primitives remain unchanged
///
/// * Date -> Int32
/// * Datetime-> Int64
/// * Time -> Int64
/// * Categorical -> UInt32
///
pub fn to_physical_repr(&self) -> Cow<Series> {
use DataType::*;
match self.dtype() {
Date => Cow::Owned(self.cast(&DataType::Int32).unwrap()),
Datetime(_, _) | Duration(_) | Time => Cow::Owned(self.cast(&DataType::Int64).unwrap()),
Categorical => Cow::Owned(self.cast(&DataType::UInt32).unwrap()),
_ => Cow::Borrowed(self),
}
}
/// Take by index if ChunkedArray contains a single chunk.
///
/// # Safety
/// This doesn't check any bounds. Null validity is checked.
pub unsafe fn take_unchecked_threaded(
&self,
idx: &UInt32Chunked,
rechunk: bool,
) -> Result<Series> {
let n_threads = POOL.current_num_threads();
let idx = split_ca(idx, n_threads)?;
let series: Result<Vec<_>> =
POOL.install(|| idx.par_iter().map(|idx| self.take_unchecked(idx)).collect());
let s = series?
.into_iter()
.reduce(|mut s, s1| {
s.append(&s1).unwrap();
s
})
.unwrap();
if rechunk {
Ok(s.rechunk())
} else {
Ok(s)
}
}
/// Take by index. This operation is clone.
///
/// # Safety
///
/// Out of bounds access doesn't Error but will return a Null value
pub fn take_threaded(&self, idx: &UInt32Chunked, rechunk: bool) -> Result<Series> {
let n_threads = POOL.current_num_threads();
let idx = split_ca(idx, n_threads).unwrap();
let series = POOL.install(|| {
idx.par_iter()
.map(|idx| self.take(idx))
.collect::<Result<Vec<_>>>()
})?;
let s = series
.into_iter()
.reduce(|mut s, s1| {
s.append(&s1).unwrap();
s
})
.unwrap();
if rechunk {
Ok(s.rechunk())
} else {
Ok(s)
}
}
/// Filter by boolean mask. This operation clones data.
pub fn filter_threaded(&self, filter: &BooleanChunked, rechunk: bool) -> Result<Series> {
// this would fail if there is a broadcasting filter.
// because we cannot split that filter over threads
// besides they are a no-op, so we do the standard filter.
if filter.len() == 1 {
return self.filter(filter);
}
let n_threads = POOL.current_num_threads();
let filters = split_ca(filter, n_threads).unwrap();
let series = split_series(self, n_threads).unwrap();
let series: Result<Vec<_>> = POOL.install(|| {
filters
.par_iter()
.zip(series)
.map(|(filter, s)| s.filter(filter))
.collect()
});
let s = series?
.into_iter()
.reduce(|mut s, s1| {
s.append(&s1).unwrap();
s
})
.unwrap();
if rechunk {
Ok(s.rechunk())
} else {
Ok(s)
}
}
#[cfg(feature = "dot_product")]
#[cfg_attr(docsrs, doc(cfg(feature = "dot_product")))]
pub fn dot(&self, other: &Series) -> Option<f64> {
(self * other).sum::<f64>()
}
#[cfg(feature = "row_hash")]
#[cfg_attr(docsrs, doc(cfg(feature = "row_hash")))]
/// Get a hash of this Series
pub fn hash(&self, build_hasher: ahash::RandomState) -> UInt64Chunked {
UInt64Chunked::new_from_aligned_vec(self.name(), self.0.vec_hash(build_hasher))
}
/// Get the sum of the Series as a new Series of length 1.
///
/// If the [`DataType`] is one of `{Int8, UInt8, Int16, UInt16}` the `Series` is
/// first cast to `Int64` to prevent overflow issues.
pub fn sum_as_series(&self) -> Series {
use DataType::*;
match self.dtype() {
Int8 | UInt8 | Int16 | UInt16 => self.cast(&Int64).unwrap().sum_as_series(),
_ => self._sum_as_series(),
}
}
/// Get an array with the cumulative max computed at every element
#[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))]
pub fn cummax(&self, _reverse: bool) -> Series {
#[cfg(feature = "cum_agg")]
{
self._cummax(_reverse)
}
#[cfg(not(feature = "cum_agg"))]
{
panic!("activate 'cum_agg' feature")
}
}
/// Get an array with the cumulative min computed at every element
#[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))]
pub fn cummin(&self, _reverse: bool) -> Series {
#[cfg(feature = "cum_agg")]
{
self._cummin(_reverse)
}
#[cfg(not(feature = "cum_agg"))]
{
panic!("activate 'cum_agg' feature")
}
}
/// Get an array with the cumulative sum computed at every element
///
/// If the [`DataType`] is one of `{Int8, UInt8, Int16, UInt16}` the `Series` is
/// first cast to `Int64` to prevent overflow issues.
#[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))]
#[allow(unused_variables)]
pub fn cumsum(&self, reverse: bool) -> Series {
#[cfg(feature = "cum_agg")]
{
use DataType::*;
match self.dtype() {
Boolean => self.cast(&DataType::UInt32).unwrap().cumsum(reverse),
Int8 | UInt8 | Int16 | UInt16 => {
let s = self.cast(&Int64).unwrap();
s.cumsum(reverse)
}
Int32 => {
let ca = self.i32().unwrap();
ca.cumsum(reverse).into_series()
}
UInt32 => {
let ca = self.u32().unwrap();
ca.cumsum(reverse).into_series()
}
UInt64 => {
let ca = self.u64().unwrap();
ca.cumsum(reverse).into_series()
}
Int64 => {
let ca = self.i64().unwrap();
ca.cumsum(reverse).into_series()
}
Float32 => {
let ca = self.f32().unwrap();
ca.cumsum(reverse).into_series()
}
Float64 => {
let ca = self.f64().unwrap();
ca.cumsum(reverse).into_series()
}
dt => panic!("cumsum not supported for dtype: {:?}", dt),
}
}
#[cfg(not(feature = "cum_agg"))]
{
panic!("activate 'cum_agg' feature")
}
}
/// Get an array with the cumulative product computed at every element
///
/// If the [`DataType`] is one of `{Int8, UInt8, Int16, UInt16, Int32, UInt32}` the `Series` is
/// first cast to `Int64` to prevent overflow issues.
#[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))]
#[allow(unused_variables)]
pub fn cumprod(&self, reverse: bool) -> Series {
#[cfg(feature = "cum_agg")]
{
use DataType::*;
match self.dtype() {
Boolean => self.cast(&DataType::Int64).unwrap().cumprod(reverse),
Int8 | UInt8 | Int16 | UInt16 | Int32 | UInt32 => {
let s = self.cast(&Int64).unwrap();
s.cumprod(reverse)
}
Int64 => {
let ca = self.i64().unwrap();
ca.cumprod(reverse).into_series()
}
UInt64 => {
let ca = self.u64().unwrap();
ca.cumprod(reverse).into_series()
}
Float32 => {
let ca = self.f32().unwrap();
ca.cumprod(reverse).into_series()
}
Float64 => {
let ca = self.f64().unwrap();
ca.cumprod(reverse).into_series()
}
dt => panic!("cumprod not supported for dtype: {:?}", dt),
}
}
#[cfg(not(feature = "cum_agg"))]
{
panic!("activate 'cum_agg' feature")
}
}
/// Get the product of an array.
///
/// If the [`DataType`] is one of `{Int8, UInt8, Int16, UInt16}` the `Series` is
/// first cast to `Int64` to prevent overflow issues.
#[cfg_attr(docsrs, doc(cfg(feature = "product")))]
pub fn product(&self) -> Series {
#[cfg(feature = "product")]
{
use DataType::*;
match self.dtype() {
Boolean => self.cast(&DataType::Int64).unwrap().product(),
Int8 | UInt8 | Int16 | UInt16 => {
let s = self.cast(&Int64).unwrap();
s.product()
}
Int64 => {
let ca = self.i64().unwrap();
ca.prod_as_series()
}
Float32 => {
let ca = self.f32().unwrap();
ca.prod_as_series()
}
Float64 => {
let ca = self.f64().unwrap();
ca.prod_as_series()
}
dt => panic!("cumprod not supported for dtype: {:?}", dt),
}
}
#[cfg(not(feature = "product"))]
{
panic!("activate 'product' feature")
}
}
/// Apply a rolling variance to a Series. See:
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_var(&self, _options: RollingOptions) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_var(_options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
/// Apply a rolling std to a Series. See:
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_std(&self, _options: RollingOptions) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_std(_options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
/// Apply a rolling mean to a Series. See:
/// [ChunkedArray::rolling_mean]
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_mean(&self, _options: RollingOptions) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_mean(_options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
/// Apply a rolling sum to a Series. See:
/// [ChunkedArray::rolling_sum]
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_sum(&self, _options: RollingOptions) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_sum(_options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
/// Apply a rolling median to a Series. See:
/// [ChunkedArray::rolling_median](crate::prelude::ChunkWindow::rolling_median).
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_median(&self, _options: RollingOptions) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_median(_options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
/// Apply a rolling quantile to a Series. See:
/// [ChunkedArray::rolling_quantile](crate::prelude::ChunkWindow::rolling_quantile).
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_quantile(
&self,
_quantile: f64,
_interpolation: QuantileInterpolOptions,
_options: RollingOptions,
) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_quantile(_quantile, _interpolation, _options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
/// Apply a rolling min to a Series. See:
/// [ChunkedArray::rolling_min]
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_min(&self, _options: RollingOptions) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_min(_options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
/// Apply a rolling max to a Series. See:
/// [ChunkedArray::rolling_max]
#[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))]
pub fn rolling_max(&self, _options: RollingOptions) -> Result<Series> {
#[cfg(feature = "rolling_window")]
{
self._rolling_max(_options)
}
#[cfg(not(feature = "rolling_window"))]
{
panic!("activate 'rolling_window' feature")
}
}
#[cfg(feature = "rank")]
#[cfg_attr(docsrs, doc(cfg(feature = "rank")))]
pub fn rank(&self, options: RankOptions) -> Series {
rank(self, options.method, options.descending)
}
/// Cast throws an error if conversion had overflows
pub fn strict_cast(&self, data_type: &DataType) -> Result<Series> {
let s = self.cast(data_type)?;
if self.null_count() != s.null_count() {
Err(PolarsError::ComputeError(
format!(
"strict conversion of cast from {:?} to {:?} failed. consider non-strict cast.\n
If you were trying to cast Utf8 to Date,Time,Datetime, consider using `strptime`",
self.dtype(),
data_type
)
.into(),
))
} else {
Ok(s)
}
}
#[cfg(feature = "dtype-time")]
pub(crate) fn into_time(self) -> Series {
match self.dtype() {
DataType::Int64 => self.i64().unwrap().clone().into_time().into_series(),
DataType::Time => self
.time()
.unwrap()
.as_ref()
.clone()
.into_time()
.into_series(),
dt => panic!("date not implemented for {:?}", dt),
}
}
pub(crate) fn into_date(self) -> Series {
match self.dtype() {
#[cfg(feature = "dtype-date")]
DataType::Int32 => self.i32().unwrap().clone().into_date().into_series(),
#[cfg(feature = "dtype-date")]
DataType::Date => self
.date()
.unwrap()
.as_ref()
.clone()
.into_date()
.into_series(),
dt => panic!("date not implemented for {:?}", dt),
}
}
pub(crate) fn into_datetime(self, timeunit: TimeUnit, tz: Option<TimeZone>) -> Series {
match self.dtype() {
#[cfg(feature = "dtype-datetime")]
DataType::Int64 => self
.i64()
.unwrap()
.clone()
.into_datetime(timeunit, tz)
.into_series(),
#[cfg(feature = "dtype-datetime")]
DataType::Datetime(_, _) => self
.datetime()
.unwrap()
.as_ref()
.clone()
.into_datetime(timeunit, tz)
.into_series(),
dt => panic!("into_datetime not implemented for {:?}", dt),
}
}
pub(crate) fn into_duration(self, timeunit: TimeUnit) -> Series {
match self.dtype() {
#[cfg(feature = "dtype-duration")]
DataType::Int64 => self
.i64()
.unwrap()
.clone()
.into_duration(timeunit)
.into_series(),
#[cfg(feature = "dtype-duration")]
DataType::Duration(_) => self
.duration()
.unwrap()
.as_ref()
.clone()
.into_duration(timeunit)
.into_series(),
dt => panic!("into_duration not implemented for {:?}", dt),
}
}
/// Check if the underlying data is a logical type.
pub fn is_logical(&self) -> bool {
use DataType::*;
matches!(
self.dtype(),
Date | Duration(_) | Datetime(_, _) | Time | Categorical
)
}
/// Check if underlying physical data is numeric.
///
/// Date types and Categoricals are also considered numeric.
pub fn is_numeric_physical(&self) -> bool {
// allow because it cannot be replaced when object feature is activated
#[allow(clippy::match_like_matches_macro)]
match self.dtype() {
DataType::Utf8 | DataType::List(_) | DataType::Boolean | DataType::Null => false,
#[cfg(feature = "object")]
DataType::Object(_) => false,
_ => true,
}
}
/// Check if underlying data is numeric
pub fn is_numeric(&self) -> bool {
// allow because it cannot be replaced when object feature is activated
#[allow(clippy::match_like_matches_macro)]
match self.dtype() {
DataType::Utf8
| DataType::List(_)
| DataType::Categorical
| DataType::Date
| DataType::Datetime(_, _)
| DataType::Duration(_)
| DataType::Boolean
| DataType::Null => false,
#[cfg(feature = "object")]
DataType::Object(_) => false,
_ => true,
}
}
#[cfg(feature = "abs")]
#[cfg_attr(docsrs, doc(cfg(feature = "abs")))]
/// convert numerical values to their absolute value
pub fn abs(&self) -> Result<Series> {
let a = self.to_physical_repr();
use DataType::*;
let out = match a.dtype() {
#[cfg(feature = "dtype-i8")]
Int8 => a.i8().unwrap().abs().into_series(),
#[cfg(feature = "dtype-i16")]
Int16 => a.i16().unwrap().abs().into_series(),
Int32 => a.i32().unwrap().abs().into_series(),
Int64 => a.i64().unwrap().abs().into_series(),
UInt8 | UInt16 | UInt32 | UInt64 => self.clone(),
Float32 => a.f32().unwrap().abs().into_series(),
Float64 => a.f64().unwrap().abs().into_series(),
dt => {
return Err(PolarsError::InvalidOperation(
format!("abs not supportedd for series of type {:?}", dt).into(),
));
}
};
Ok(out)
}
#[cfg(feature = "private")]
// used for formatting
pub fn str_value(&self, index: usize) -> Cow<str> {
match self.0.get(index) {
AnyValue::Utf8(s) => Cow::Borrowed(s),
AnyValue::Null => Cow::Borrowed("null"),
#[cfg(feature = "dtype-categorical")]
AnyValue::Categorical(idx, rev) => Cow::Borrowed(rev.get(idx)),
av => Cow::Owned(format!("{}", av)),
}
}
/// Get the head of the Series.
pub fn head(&self, length: Option<usize>) -> Series {
match length {
Some(len) => self.slice(0, std::cmp::min(len, self.len())),
None => self.slice(0, std::cmp::min(10, self.len())),
}
}
/// Get the tail of the Series.
pub fn tail(&self, length: Option<usize>) -> Series {
let len = match length {
Some(len) => std::cmp::min(len, self.len()),
None => std::cmp::min(10, self.len()),
};
self.slice(-(len as i64), len)
}
}
impl Deref for Series {
type Target = dyn SeriesTrait;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
impl<'a> AsRef<(dyn SeriesTrait + 'a)> for Series {
fn as_ref(&self) -> &(dyn SeriesTrait + 'a) {
&*self.0
}
}
impl Default for Series {
fn default() -> Self {
Int64Chunked::default().into_series()
}
}
impl<'a, T> AsRef<ChunkedArray<T>> for dyn SeriesTrait + 'a
where
T: 'static + PolarsDataType,
{
fn as_ref(&self) -> &ChunkedArray<T> {
if &T::get_dtype() == self.dtype() ||
// needed because we want to get ref of List no matter what the inner type is.
(matches!(T::get_dtype(), DataType::List(_)) && matches!(self.dtype(), DataType::List(_)))
{
unsafe { &*(self as *const dyn SeriesTrait as *const ChunkedArray<T>) }
} else {
panic!(
"implementation error, cannot get ref {:?} from {:?}",
T::get_dtype(),
self.dtype()
)
}
}
}
impl<'a, T> AsMut<ChunkedArray<T>> for dyn SeriesTrait + 'a
where
T: 'static + PolarsDataType,
{
fn as_mut(&mut self) -> &mut ChunkedArray<T> {
if &T::get_dtype() == self.dtype() ||
// needed because we want to get ref of List no matter what the inner type is.
(matches!(T::get_dtype(), DataType::List(_)) && matches!(self.dtype(), DataType::List(_)))
{
unsafe { &mut *(self as *mut dyn SeriesTrait as *mut ChunkedArray<T>) }
} else {
panic!(
"implementation error, cannot get ref {:?} from {:?}",
T::get_dtype(),
self.dtype()
)
}
}
}
#[cfg(test)]
mod test {
use crate::prelude::*;
use crate::series::*;
use std::convert::TryFrom;
#[test]
fn cast() {
let ar = UInt32Chunked::new("a", &[1, 2]);
let s = ar.into_series();
let s2 = s.cast(&DataType::Int64).unwrap();
assert!(s2.i64().is_ok());
let s2 = s.cast(&DataType::Float32).unwrap();
assert!(s2.f32().is_ok());
}
#[test]
fn new_series() {
let _ = Series::new("boolean series", &vec![true, false, true]);
let _ = Series::new("int series", &[1, 2, 3]);
let ca = Int32Chunked::new("a", &[1, 2, 3]);
let _ = ca.into_series();
}
#[test]
fn new_series_from_arrow_primitive_array() {
let array = UInt32Array::from_slice(&[1, 2, 3, 4, 5]);
let array_ref: ArrayRef = Arc::new(array);
let _ = Series::try_from(("foo", array_ref)).unwrap();
}
#[test]
fn series_append() {
let mut s1 = Series::new("a", &[1, 2]);
let s2 = Series::new("b", &[3]);
s1.append(&s2).unwrap();
assert_eq!(s1.len(), 3);
// add wrong type
let s2 = Series::new("b", &[3.0]);
assert!(s1.append(&s2).is_err())
}
#[test]
fn series_slice_works() {
let series = Series::new("a", &[1i64, 2, 3, 4, 5]);
let slice_1 = series.slice(-3, 3);
let slice_2 = series.slice(-5, 5);
let slice_3 = series.slice(0, 5);
assert_eq!(slice_1.get(0), AnyValue::Int64(3));
assert_eq!(slice_2.get(0), AnyValue::Int64(1));
assert_eq!(slice_3.get(0), AnyValue::Int64(1));
}
#[test]
fn out_of_range_slice_does_not_panic() {
let series = Series::new("a", &[1i64, 2, 3, 4, 5]);
let _ = series.slice(-3, 4);
let _ = series.slice(-6, 2);
let _ = series.slice(4, 2);
}
#[test]
#[cfg(feature = "round_series")]
fn test_round_series() {
let series = Series::new("a", &[1.003, 2.23222, 3.4352]);
let out = series.round(2).unwrap();
let ca = out.f64().unwrap();
assert_eq!(ca.get(0), Some(1.0));
}
}
| 32.784091 | 110 | 0.517764 |
9bf6fb40605ffa2305d6112babdf1825f36152b9 | 5,163 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use gio_sys;
use glib;
use glib::object::IsA;
use glib::translate::*;
use std::fmt;
use Icon;
use MenuModel;
glib_wrapper! {
pub struct MenuItem(Object<gio_sys::GMenuItem>);
match fn {
get_type => || gio_sys::g_menu_item_get_type(),
}
}
impl MenuItem {
pub fn new(label: Option<&str>, detailed_action: Option<&str>) -> MenuItem {
unsafe {
from_glib_full(gio_sys::g_menu_item_new(
label.to_glib_none().0,
detailed_action.to_glib_none().0,
))
}
}
pub fn from_model<P: IsA<MenuModel>>(model: &P, item_index: i32) -> MenuItem {
unsafe {
from_glib_full(gio_sys::g_menu_item_new_from_model(
model.as_ref().to_glib_none().0,
item_index,
))
}
}
pub fn new_section<P: IsA<MenuModel>>(label: Option<&str>, section: &P) -> MenuItem {
unsafe {
from_glib_full(gio_sys::g_menu_item_new_section(
label.to_glib_none().0,
section.as_ref().to_glib_none().0,
))
}
}
pub fn new_submenu<P: IsA<MenuModel>>(label: Option<&str>, submenu: &P) -> MenuItem {
unsafe {
from_glib_full(gio_sys::g_menu_item_new_submenu(
label.to_glib_none().0,
submenu.as_ref().to_glib_none().0,
))
}
}
//pub fn get_attribute(&self, attribute: &str, format_string: &str, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> bool {
// unsafe { TODO: call gio_sys:g_menu_item_get_attribute() }
//}
pub fn get_attribute_value(
&self,
attribute: &str,
expected_type: Option<&glib::VariantTy>,
) -> Option<glib::Variant> {
unsafe {
from_glib_full(gio_sys::g_menu_item_get_attribute_value(
self.to_glib_none().0,
attribute.to_glib_none().0,
expected_type.to_glib_none().0,
))
}
}
pub fn get_link(&self, link: &str) -> Option<MenuModel> {
unsafe {
from_glib_full(gio_sys::g_menu_item_get_link(
self.to_glib_none().0,
link.to_glib_none().0,
))
}
}
//pub fn set_action_and_target(&self, action: Option<&str>, format_string: Option<&str>, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) {
// unsafe { TODO: call gio_sys:g_menu_item_set_action_and_target() }
//}
pub fn set_action_and_target_value(
&self,
action: Option<&str>,
target_value: Option<&glib::Variant>,
) {
unsafe {
gio_sys::g_menu_item_set_action_and_target_value(
self.to_glib_none().0,
action.to_glib_none().0,
target_value.to_glib_none().0,
);
}
}
//pub fn set_attribute(&self, attribute: &str, format_string: Option<&str>, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) {
// unsafe { TODO: call gio_sys:g_menu_item_set_attribute() }
//}
pub fn set_attribute_value(&self, attribute: &str, value: Option<&glib::Variant>) {
unsafe {
gio_sys::g_menu_item_set_attribute_value(
self.to_glib_none().0,
attribute.to_glib_none().0,
value.to_glib_none().0,
);
}
}
pub fn set_detailed_action(&self, detailed_action: &str) {
unsafe {
gio_sys::g_menu_item_set_detailed_action(
self.to_glib_none().0,
detailed_action.to_glib_none().0,
);
}
}
pub fn set_icon<P: IsA<Icon>>(&self, icon: &P) {
unsafe {
gio_sys::g_menu_item_set_icon(self.to_glib_none().0, icon.as_ref().to_glib_none().0);
}
}
pub fn set_label(&self, label: Option<&str>) {
unsafe {
gio_sys::g_menu_item_set_label(self.to_glib_none().0, label.to_glib_none().0);
}
}
pub fn set_link<P: IsA<MenuModel>>(&self, link: &str, model: Option<&P>) {
unsafe {
gio_sys::g_menu_item_set_link(
self.to_glib_none().0,
link.to_glib_none().0,
model.map(|p| p.as_ref()).to_glib_none().0,
);
}
}
pub fn set_section<P: IsA<MenuModel>>(&self, section: Option<&P>) {
unsafe {
gio_sys::g_menu_item_set_section(
self.to_glib_none().0,
section.map(|p| p.as_ref()).to_glib_none().0,
);
}
}
pub fn set_submenu<P: IsA<MenuModel>>(&self, submenu: Option<&P>) {
unsafe {
gio_sys::g_menu_item_set_submenu(
self.to_glib_none().0,
submenu.map(|p| p.as_ref()).to_glib_none().0,
);
}
}
}
impl fmt::Display for MenuItem {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "MenuItem")
}
}
| 30.017442 | 157 | 0.550261 |
2f92052ad47fa8ca9cf9a27d8b5cab6f03deebdc | 1,904 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use aptos_types::transaction::{ModuleBundle, TransactionPayload};
use forge::{AptosContext, AptosTest, Result, Test};
pub struct ModulePublish;
impl Test for ModulePublish {
fn name(&self) -> &'static str {
"smoke-test::aptos::module-publish"
}
}
#[async_trait::async_trait]
impl AptosTest for ModulePublish {
async fn run<'t>(&self, ctx: &mut AptosContext<'t>) -> Result<()> {
let base_path =
std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("src/aptos/move_modules/");
let build_config = move_package::BuildConfig {
generate_docs: true,
generate_abis: true,
install_dir: Some(base_path.clone()),
..Default::default()
};
let compiled_package = build_config
.clone()
.compile_package(&base_path, &mut std::io::stdout())
.unwrap();
let mut blobs = vec![];
compiled_package
.compiled_modules()
.iter_modules()
.first()
.unwrap()
.serialize(&mut blobs)
.unwrap();
let txn_factory = ctx.aptos_transaction_factory();
let publish_txn = ctx
.root_account()
.sign_with_transaction_builder(txn_factory.payload(TransactionPayload::ModuleBundle(
ModuleBundle::singleton(blobs.clone()),
)));
ctx.client().submit_and_wait(&publish_txn).await?;
let publish_txn = ctx
.root_account()
.sign_with_transaction_builder(txn_factory.payload(TransactionPayload::ModuleBundle(
ModuleBundle::singleton(blobs),
)));
// republish should fail
ctx.client()
.submit_and_wait(&publish_txn)
.await
.unwrap_err();
Ok(())
}
}
| 30.709677 | 96 | 0.578256 |
bb21271b0874acbb867df5168543989dfb1ed2d3 | 3,066 | #[doc = "Register `CIS_CONF1` reader"]
pub struct R(crate::R<CIS_CONF1_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<CIS_CONF1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<CIS_CONF1_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<CIS_CONF1_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `CIS_CONF1` writer"]
pub struct W(crate::W<CIS_CONF1_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<CIS_CONF1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<CIS_CONF1_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<CIS_CONF1_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `CIS_CONF_W1` reader - "]
pub struct CIS_CONF_W1_R(crate::FieldReader<u32, u32>);
impl CIS_CONF_W1_R {
#[inline(always)]
pub(crate) fn new(bits: u32) -> Self {
CIS_CONF_W1_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CIS_CONF_W1_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CIS_CONF_W1` writer - "]
pub struct CIS_CONF_W1_W<'a> {
w: &'a mut W,
}
impl<'a> CIS_CONF_W1_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = value as u32;
self.w
}
}
impl R {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn cis_conf_w1(&self) -> CIS_CONF_W1_R {
CIS_CONF_W1_R::new(self.bits as u32)
}
}
impl W {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn cis_conf_w1(&mut self) -> CIS_CONF_W1_W {
CIS_CONF_W1_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [cis_conf1](index.html) module"]
pub struct CIS_CONF1_SPEC;
impl crate::RegisterSpec for CIS_CONF1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [cis_conf1::R](R) reader structure"]
impl crate::Readable for CIS_CONF1_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [cis_conf1::W](W) writer structure"]
impl crate::Writable for CIS_CONF1_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets CIS_CONF1 to value 0xffff_ffff"]
impl crate::Resettable for CIS_CONF1_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0xffff_ffff
}
}
| 29.480769 | 390 | 0.612198 |
1d6b6c119ab9a6ff52be5690a73160ae86b8c85b | 17,768 | #![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit = "256"]
// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use pallet_grandpa::fg_primitives;
use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList};
use sp_api::impl_runtime_apis;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
use sp_runtime::traits::{
BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Saturating, Verify,
};
use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, MultiSignature,
};
use sp_std::prelude::*;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
// A few exports that help ease life for downstream crates.
pub use frame_support::{
construct_runtime, parameter_types,
traits::{KeyOwnerProofSystem, Randomness},
weights::{
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
IdentityFee, Weight,
},
StorageValue,
};
pub use pallet_balances::Call as BalancesCall;
pub use pallet_timestamp::Call as TimestampCall;
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Perbill, Permill};
/// Import the template pallet.
pub use pallet_template;
/// An index to a block.
pub type BlockNumber = u32;
/// Alias to 512-bit hash when used in the context of a transaction signature on the chain.
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
/// The type for looking up accounts. We don't expect more than 4 billion of them, but you
/// never know...
pub type AccountIndex = u32;
/// Balance of an account.
pub type Balance = u128;
/// Index of a transaction in the chain.
pub type Index = u32;
/// A hash of some data used by the chain.
pub type Hash = sp_core::H256;
/// Digest item type.
pub type DigestItem = generic::DigestItem<Hash>;
/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know
/// the specifics of the runtime. They can then be made to be agnostic over specific formats
/// of data like extrinsics, allowing for them to continue syncing the network through upgrades
/// to even the core data structures.
pub mod opaque {
use super::*;
pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic;
/// Opaque block header type.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Opaque block type.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// Opaque block identifier type.
pub type BlockId = generic::BlockId<Block>;
impl_opaque_keys! {
pub struct SessionKeys {
pub aura: Aura,
pub grandpa: Grandpa,
}
}
}
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("node-template"),
impl_name: create_runtime_str!("node-template"),
authoring_version: 1,
spec_version: 100,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
};
pub const MILLISECS_PER_BLOCK: u64 = 6000;
pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
// Time is measured by number of blocks.
pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
}
parameter_types! {
pub const BlockHashCount: BlockNumber = 2400;
/// We allow for 2 seconds of compute with a 6 second average block time.
pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
/// Assume 10% of weight for average on_initialize calls.
pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get()
.saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get();
pub const MaximumBlockLength: u32 = 5 * 1024 * 1024;
pub const Version: RuntimeVersion = VERSION;
}
// Configure FRAME pallets to include in runtime.
impl frame_system::Trait for Runtime {
/// The basic call filter to use in dispatchable.
type BaseCallFilter = ();
/// The identifier used to distinguish between accounts.
type AccountId = AccountId;
/// The aggregated dispatch type that is available for extrinsics.
type Call = Call;
/// The lookup mechanism to get account ID from whatever is passed in dispatchers.
type Lookup = multiaddress::AccountIdLookup<AccountId, ()>;
/// The index type for storing how many extrinsics an account has signed.
type Index = Index;
/// The index type for blocks.
type BlockNumber = BlockNumber;
/// The type for hashing blocks and tries.
type Hash = Hash;
/// The hashing algorithm used.
type Hashing = BlakeTwo256;
/// The header type.
type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// The ubiquitous event type.
type Event = Event;
/// The ubiquitous origin type.
type Origin = Origin;
/// Maximum number of block number to block hash mappings to keep (oldest pruned first).
type BlockHashCount = BlockHashCount;
/// Maximum weight of each block.
type MaximumBlockWeight = MaximumBlockWeight;
/// The weight of database operations that the runtime can invoke.
type DbWeight = RocksDbWeight;
/// The weight of the overhead invoked on the block import process, independent of the
/// extrinsics included in that block.
type BlockExecutionWeight = BlockExecutionWeight;
/// The base weight of any extrinsic processed by the runtime, independent of the
/// logic of that extrinsic. (Signature verification, nonce increment, fee, etc...)
type ExtrinsicBaseWeight = ExtrinsicBaseWeight;
/// The maximum weight that a single extrinsic of `Normal` dispatch class can have,
/// idependent of the logic of that extrinsics. (Roughly max block weight - average on
/// initialize cost).
type MaximumExtrinsicWeight = MaximumExtrinsicWeight;
/// Maximum size of all encoded transactions (in bytes) that are allowed in one block.
type MaximumBlockLength = MaximumBlockLength;
/// Portion of the block weight that is available to all normal transactions.
type AvailableBlockRatio = AvailableBlockRatio;
/// Version of the runtime.
type Version = Version;
/// Converts a module to the index of the module in `construct_runtime!`.
///
/// This type is being generated by `construct_runtime!`.
type PalletInfo = PalletInfo;
/// What to do if a new account is created.
type OnNewAccount = ();
/// What to do if an account is fully reaped from the system.
type OnKilledAccount = ();
/// The data to be stored in an account.
type AccountData = pallet_balances::AccountData<Balance>;
/// Weight information for the extrinsics of this pallet.
type SystemWeightInfo = ();
}
impl pallet_aura::Trait for Runtime {
type AuthorityId = AuraId;
}
impl pallet_grandpa::Trait for Runtime {
type Event = Event;
type Call = Call;
type KeyOwnerProofSystem = ();
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
KeyTypeId,
GrandpaId,
)>>::IdentificationTuple;
type HandleEquivocation = ();
type WeightInfo = ();
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
impl pallet_timestamp::Trait for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = u64;
type OnTimestampSet = Aura;
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500;
pub const MaxLocks: u32 = 50;
}
impl pallet_balances::Trait for Runtime {
type MaxLocks = MaxLocks;
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
}
parameter_types! {
pub const TransactionByteFee: Balance = 1;
}
impl pallet_transaction_payment::Trait for Runtime {
type Currency = Balances;
type OnTransactionPayment = ();
type TransactionByteFee = TransactionByteFee;
type WeightToFee = IdentityFee<Balance>;
type FeeMultiplierUpdate = ();
}
impl pallet_sudo::Trait for Runtime {
type Event = Event;
type Call = Call;
}
/// Configure the template pallet in pallets/template.
impl pallet_template::Trait for Runtime {
type Event = Event;
}
// Create the runtime by composing the FRAME pallets that were previously configured.
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = opaque::Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage},
Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent},
Aura: pallet_aura::{Module, Config<T>, Inherent},
Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
TransactionPayment: pallet_transaction_payment::{Module, Storage},
Sudo: pallet_sudo::{Module, Call, Config<T>, Storage, Event<T>},
// Include the custom logic from the template pallet in the runtime.
TemplateModule: pallet_template::{Module, Call, Storage, Event<T>},
}
);
/// The address format for describing accounts.
mod multiaddress;
pub type Address = multiaddress::MultiAddress<AccountId, ()>;
/// Block header type as expected by this runtime.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Block>;
/// BlockId type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// The SignedExtension to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
);
/// Unchecked extrinsic type as expected by this runtime.
pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>;
/// Extrinsic type that has already been checked.
pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllModules,
>;
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block)
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
Runtime::metadata().into()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(data: sp_inherents::InherentData) ->
Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(
block: Block,
data: sp_inherents::InherentData,
) -> sp_inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
fn random_seed() -> <Block as BlockT>::Hash {
RandomnessCollectiveFlip::random_seed()
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
) -> TransactionValidity {
Executive::validate_transaction(source, tx)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
fn slot_duration() -> u64 {
Aura::slot_duration()
}
fn authorities() -> Vec<AuraId> {
Aura::authorities()
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
opaque::SessionKeys::generate(seed)
}
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
opaque::SessionKeys::decode_into_raw_public_keys(&encoded)
}
}
impl fg_primitives::GrandpaApi<Block> for Runtime {
fn grandpa_authorities() -> GrandpaAuthorityList {
Grandpa::grandpa_authorities()
}
fn submit_report_equivocation_unsigned_extrinsic(
_equivocation_proof: fg_primitives::EquivocationProof<
<Block as BlockT>::Hash,
NumberFor<Block>,
>,
_key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof,
) -> Option<()> {
None
}
fn generate_key_ownership_proof(
_set_id: fg_primitives::SetId,
_authority_id: GrandpaId,
) -> Option<fg_primitives::OpaqueKeyOwnershipProof> {
// NOTE: this is the only implementation possible since we've
// defined our key owner proof type as a bottom type (i.e. a type
// with no values).
None
}
}
impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime {
fn account_nonce(account: AccountId) -> Index {
System::account_nonce(account)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance>
for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_info(uxt, len)
}
}
#[cfg(feature = "runtime-benchmarks")]
impl frame_benchmarking::Benchmark<Block> for Runtime {
fn dispatch_benchmark(
config: frame_benchmarking::BenchmarkConfig
) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey};
use frame_system_benchmarking::Module as SystemBench;
impl frame_system_benchmarking::Trait for Runtime {}
let whitelist: Vec<TrackedStorageKey> = vec![
// Block Number
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac")
.to_vec().into(),
// Total Issuance
hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80")
.to_vec().into(),
// Execution Phase
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a")
.to_vec().into(),
// Event Count
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850")
.to_vec().into(),
// System Events
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7")
.to_vec().into(),
];
let mut batches = Vec::<BenchmarkBatch>::new();
let params = (&config, &whitelist);
add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>);
add_benchmark!(params, batches, pallet_balances, Balances);
add_benchmark!(params, batches, pallet_timestamp, Timestamp);
add_benchmark!(params, batches, pallet_template, TemplateModule);
if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
Ok(batches)
}
}
}
| 36.113821 | 101 | 0.676947 |
e6a45295ac718dc6a78c5762b4913116a2e00c81 | 9,941 | use crate::common::*;
use datamodel::{ast, diagnostics::DatamodelError};
#[test]
fn shound_fail_on_attribute_duplication() {
let dml = r#"
type ID = String @id @default(cuid())
model Model {
id ID @id
}
"#;
let error = parse_error(dml);
error.assert_is_at(
1,
DatamodelError::new_duplicate_attribute_error("id", ast::Span::new(23, 25)),
);
error.assert_is_at(
0,
DatamodelError::new_duplicate_attribute_error("id", ast::Span::new(77, 79)),
);
}
#[test]
fn shound_fail_on_attribute_duplication_recursive() {
let dml = r#"
type MyStringWithDefault = String @default(cuid())
type ID = MyStringWithDefault @id
model Model {
id ID @default(cuid())
}
"#;
let error = parse_error(dml);
error.assert_is_at(
1,
DatamodelError::new_duplicate_attribute_error("default", ast::Span::new(40, 55)),
);
error.assert_is_at(
0,
DatamodelError::new_duplicate_attribute_error("default", ast::Span::new(128, 143)),
);
}
#[test]
fn should_fail_on_endless_recursive_type_def() {
let dml = r#"
type MyString = ID
type MyStringWithDefault = MyString
type ID = MyStringWithDefault
model Model {
id ID
}
"#;
let error = datamodel::parse_schema(dml).map(drop).unwrap_err();
let expectation = expect![[r#"
[1;91merror[0m: [1mError validating: Recursive type definitions are not allowed. Recursive path was: MyString -> ID -> MyStringWithDefault -> MyString.[0m
[1;94m-->[0m [4mschema.prisma:2[0m
[1;94m | [0m
[1;94m 1 | [0m
[1;94m 2 | [0m type MyString = [1;91mID[0m
[1;94m | [0m
[1;91merror[0m: [1mError validating: Recursive type definitions are not allowed. Recursive path was: MyStringWithDefault -> MyString -> ID -> MyStringWithDefault.[0m
[1;94m-->[0m [4mschema.prisma:3[0m
[1;94m | [0m
[1;94m 2 | [0m type MyString = ID
[1;94m 3 | [0m type MyStringWithDefault = [1;91mMyString[0m
[1;94m | [0m
[1;91merror[0m: [1mError validating: Recursive type definitions are not allowed. Recursive path was: ID -> MyStringWithDefault -> MyString -> ID.[0m
[1;94m-->[0m [4mschema.prisma:4[0m
[1;94m | [0m
[1;94m 3 | [0m type MyStringWithDefault = MyString
[1;94m 4 | [0m type ID = [1;91mMyStringWithDefault[0m
[1;94m | [0m
"#]];
expectation.assert_eq(&error);
}
#[test]
fn shound_fail_on_unresolvable_type() {
let dml = r#"
type MyString = Hugo
type MyStringWithDefault = MyString
type ID = MyStringWithDefault
model Model {
id ID
}
"#;
let error = parse_error(dml);
error.assert_is(DatamodelError::new_type_not_found_error("Hugo", ast::Span::new(21, 25)));
}
#[test]
fn should_fail_on_custom_related_types() {
let dml = r#"
type UserViaEmail = User @relation(references: email)
type UniqueString = String @unique
model User {
id Int @id
email UniqueString
posts Post[]
}
model Post {
id Int @id
user UserViaEmail
}
"#;
let error = parse_error(dml);
error.assert_is(DatamodelError::new_validation_error(
"Only scalar types can be used for defining custom types.".to_owned(),
ast::Span::new(25, 29),
));
}
#[test]
fn should_fail_on_native_type_with_invalid_datasource_name() {
let dml = r#"
datasource db {
provider = "postgres"
url = "postgresql://"
}
model Blog {
id Int @id
bigInt Int @pg.Integer
}
"#;
let error = parse_error(dml);
error.assert_is(DatamodelError::new(
"The prefix pg is invalid. It must be equal to the name of an existing datasource e.g. db. Did you mean to use db.Integer?".into(),
ast::Span::new(178, 188),
));
}
#[test]
fn should_fail_on_native_type_with_invalid_number_of_arguments() {
let dml = r#"
datasource pg {
provider = "postgres"
url = "postgresql://"
}
model Blog {
id Int @id
bigInt Int @pg.Integer
foobar String @pg.VarChar(2, 3, 4)
}
"#;
let error = parse_error(dml);
error.assert_is(DatamodelError::new(
"Native type VarChar takes 1 optional arguments, but received 3.".into(),
ast::Span::new(216, 235),
));
}
#[test]
fn should_fail_on_native_type_with_unknown_type() {
let dml = r#"
datasource pg {
provider = "postgres"
url = "postgresql://"
}
model Blog {
id Int @id
bigInt Int @pg.Numerical(3, 4)
foobar String @pg.VarChar(5)
}
"#;
let error = parse_error(dml);
error.assert_is(DatamodelError::new(
"Native type Numerical is not supported for postgresql connector.".into(),
ast::Span::new(178, 196),
));
}
#[test]
fn should_fail_on_native_type_with_incompatible_type() {
let dml = r#"
datasource pg {
provider = "postgres"
url = "postgresql://"
}
model Blog {
id Int @id
foobar Boolean @pg.VarChar(5)
foo Int @pg.BigInt
}
"#;
let error = parse_error(dml);
error.assert_length(2);
error.assert_is_at(
0,
DatamodelError::new(
"Native type VarChar is not compatible with declared field type Boolean, expected field type String."
.into(),
ast::Span::new(179, 192),
),
);
error.assert_is_at(
1,
DatamodelError::new(
"Native type BigInt is not compatible with declared field type Int, expected field type BigInt.".into(),
ast::Span::new(214, 223),
),
);
}
#[test]
fn should_fail_on_native_type_with_invalid_arguments() {
let dml = r#"
datasource pg {
provider = "postgres"
url = "postgresql://"
}
model Blog {
id Int @id
foobar String @pg.VarChar(a)
}
"#;
let error = parse_error(dml);
error.assert_is(DatamodelError::new(
"Expected a numeric value, but failed while parsing \"a\": invalid digit found in string.".into(),
ast::Span::new(178, 191),
));
}
#[test]
fn should_fail_on_native_type_in_unsupported_postgres() {
let dml = r#"
datasource pg {
provider = "postgres"
url = "postgresql://"
}
model Blog {
id Int @id
decimal Unsupported("Decimal(10,2)")
text Unsupported("Text")
unsupported Unsupported("Some random stuff")
unsupportes2 Unsupported("Some random (2,5) do something")
}
"#;
let error = parse_error(dml);
error.assert_are(&[
DatamodelError::new_validation_error(
"The type `Unsupported(\"Decimal(10,2)\")` you specified in the type definition for the field `decimal` is supported as a native type by Prisma. Please use the native type notation `Decimal @pg.Decimal(10,2)` for full support.".to_owned(),
ast::Span::new(172, 217),
),
DatamodelError::new_validation_error(
"The type `Unsupported(\"Text\")` you specified in the type definition for the field `text` is supported as a native type by Prisma. Please use the native type notation `String @pg.Text` for full support.".to_owned(),
ast::Span::new(229, 265),
)
]);
}
#[test]
fn should_fail_on_native_type_in_unsupported_mysql() {
let dml = r#"
datasource pg {
provider = "mysql"
url = "mysql://"
}
model Blog {
id Int @id
text Unsupported("Text")
decimal Unsupported("Float")
}
"#;
let error = parse_error(dml);
error.assert_are(&[
DatamodelError::new_validation_error(
"The type `Unsupported(\"Text\")` you specified in the type definition for the field `text` is supported as a native type by Prisma. Please use the native type notation `String @pg.Text` for full support.".to_owned(),
ast::Span::new(160, 192),
),
DatamodelError::new_validation_error(
"The type `Unsupported(\"Float\")` you specified in the type definition for the field `decimal` is supported as a native type by Prisma. Please use the native type notation `Float @pg.Float` for full support.".to_owned(),
ast::Span::new(204, 237),
)
]);
}
#[test]
fn should_fail_on_native_type_in_unsupported_sqlserver() {
let dml = r#"
datasource pg {
provider = "sqlserver"
url = "sqlserver://"
}
model Blog {
id Int @id
text Unsupported("Text")
decimal Unsupported("Real")
TEXT Unsupported("TEXT")
}
"#;
let error = parse_error(dml);
error.assert_are(&[
DatamodelError::new_validation_error(
"The type `Unsupported(\"Text\")` you specified in the type definition for the field `text` is supported as a native type by Prisma. Please use the native type notation `String @pg.Text` for full support.".to_owned(),
ast::Span::new(168, 200),
),
DatamodelError::new_validation_error(
"The type `Unsupported(\"Real\")` you specified in the type definition for the field `decimal` is supported as a native type by Prisma. Please use the native type notation `Float @pg.Real` for full support.".to_owned(),
ast::Span::new(212, 244),
)
]);
}
| 28.814493 | 247 | 0.576401 |
1d789797e088c00742158202981ce85ff0c7f459 | 1,362 | use super::*;
use crate::TransformContent;
use std::marker;
macro_rules! add_mat_impl {
($($matrix:ident);*) => {
$(
impl<V, O, S: ToSlice<T>, T: RealNumber> RededicateForceOps<$matrix<O, S, T>>
for $matrix<V, S, T>
where V: RededicateForceOps<O> + Vector<T>,
T: RealNumber,
O: Vector<T> {
fn rededicate_from_force(origin: $matrix<O, S, T>) -> Self {
let rows = origin.rows.transform(V::rededicate_from_force);
$matrix {
rows: rows,
storage_type: marker::PhantomData,
number_type: marker::PhantomData
}
}
fn rededicate_with_runtime_data(
origin: $matrix<O, S, T>,
is_complex: bool,
domain: DataDomain) -> Self {
let rows =
origin.rows.transform(
|v|V::rededicate_with_runtime_data(v, is_complex, domain));
$matrix {
rows: rows,
storage_type: marker::PhantomData,
number_type: marker::PhantomData
}
}
}
)*
}
}
add_mat_impl!(MatrixMxN; Matrix2xN; Matrix3xN; Matrix4xN);
| 32.428571 | 87 | 0.459618 |
72be1e0f3adfc6e32661d9a07b7944a799d82be3 | 2,946 | //! Types for the *m.key.verification.done* event.
use ruma_events_macros::EventContent;
use serde::{Deserialize, Serialize};
use super::Relation;
use crate::MessageEvent;
/// Event signaling that the interactive key verification has successfully
/// concluded.
pub type DoneEvent = MessageEvent<DoneEventContent>;
/// The payload for a to-device `m.key.verification.done` event.
#[derive(Clone, Debug, Deserialize, Serialize, EventContent)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[ruma_event(type = "m.key.verification.done", kind = ToDevice)]
pub struct DoneToDeviceEventContent {
/// An opaque identifier for the verification process.
///
/// Must be the same as the one used for the *m.key.verification.start* message.
pub transaction_id: String,
}
impl DoneToDeviceEventContent {
/// Creates a new `DoneToDeviceEventContent` with the given transaction ID.
pub fn new(transaction_id: String) -> Self {
Self { transaction_id }
}
}
/// The payload for a in-room `m.key.verification.done` event.
#[derive(Clone, Debug, Deserialize, Serialize, EventContent)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[ruma_event(type = "m.key.verification.done", kind = Message)]
pub struct DoneEventContent {
/// Relation signaling which verification request this event is responding to.
#[serde(rename = "m.relates_to")]
pub relates_to: Relation,
}
impl DoneEventContent {
/// Creates a new `DoneEventContent` with the given relation.
pub fn new(relates_to: Relation) -> Self {
Self { relates_to }
}
}
#[cfg(test)]
mod tests {
use matches::assert_matches;
use ruma_identifiers::event_id;
use ruma_serde::Raw;
use serde_json::{from_value as from_json_value, json, to_value as to_json_value};
use super::DoneEventContent;
use crate::key::verification::Relation;
#[test]
fn serialization() {
let event_id = event_id!("$1598361704261elfgc:localhost");
let json_data = json!({
"m.relates_to": {
"rel_type": "m.reference",
"event_id": event_id,
}
});
let content = DoneEventContent { relates_to: Relation { event_id } };
assert_eq!(to_json_value(&content).unwrap(), json_data);
}
#[test]
fn deserialization() {
let id = event_id!("$1598361704261elfgc:localhost");
let json_data = json!({
"m.relates_to": {
"rel_type": "m.reference",
"event_id": id,
}
});
assert_matches!(
from_json_value::<Raw<DoneEventContent>>(json_data)
.unwrap()
.deserialize()
.unwrap(),
DoneEventContent {
relates_to: Relation {
event_id
},
} if event_id == id
);
}
}
| 30.061224 | 85 | 0.628649 |
bbe875c4839b8b2d2544f01601f95816964064e8 | 4,058 | #[macro_use]
extern crate criterion;
use bytes::Bytes;
#[cfg(has_asm)]
use ckb_vm::machine::{aot::AotCompilingMachine, asm::AsmMachine, VERSION0};
use ckb_vm::{run, SparseMemory, ISA_IMC};
use criterion::Criterion;
use std::fs::File;
use std::io::Read;
fn interpret_benchmark(c: &mut Criterion) {
c.bench_function("interpret secp256k1_bench", |b| {
let mut file = File::open("benches/data/secp256k1_bench").unwrap();
let mut buffer = Vec::new();
file.read_to_end(&mut buffer).unwrap();
let buffer = Bytes::from(buffer);
let args: Vec<Bytes> = vec!["secp256k1_bench",
"033f8cf9c4d51a33206a6c1c6b27d2cc5129daa19dbd1fc148d395284f6b26411f",
"304402203679d909f43f073c7c1dcf8468a485090589079ee834e6eed92fea9b09b06a2402201e46f1075afa18f306715e7db87493e7b7e779569aa13c64ab3d09980b3560a3",
"foo",
"bar"].into_iter().map(|a| a.into()).collect();
b.iter(|| run::<u64, SparseMemory<u64>>(&buffer, &args[..]).unwrap());
});
}
#[cfg(has_asm)]
fn asm_benchmark(c: &mut Criterion) {
c.bench_function("interpret secp256k1_bench via assembly", |b| {
let mut file = File::open("benches/data/secp256k1_bench").unwrap();
let mut buffer = Vec::new();
file.read_to_end(&mut buffer).unwrap();
let buffer = Bytes::from(buffer);
let args: Vec<Bytes> = vec!["secp256k1_bench",
"033f8cf9c4d51a33206a6c1c6b27d2cc5129daa19dbd1fc148d395284f6b26411f",
"304402203679d909f43f073c7c1dcf8468a485090589079ee834e6eed92fea9b09b06a2402201e46f1075afa18f306715e7db87493e7b7e779569aa13c64ab3d09980b3560a3",
"foo",
"bar"].into_iter().map(|a| a.into()).collect();
b.iter(|| {
let mut machine = AsmMachine::default();
machine.load_program(&buffer, &args[..]).unwrap();
machine.run().unwrap()
});
});
}
#[cfg(has_asm)]
fn aot_benchmark(c: &mut Criterion) {
c.bench_function("aot secp256k1_bench", |b| {
let mut file = File::open("benches/data/secp256k1_bench").unwrap();
let mut buffer = Vec::new();
file.read_to_end(&mut buffer).unwrap();
let buffer = Bytes::from(buffer);
let args: Vec<Bytes> = vec!["secp256k1_bench",
"033f8cf9c4d51a33206a6c1c6b27d2cc5129daa19dbd1fc148d395284f6b26411f",
"304402203679d909f43f073c7c1dcf8468a485090589079ee834e6eed92fea9b09b06a2402201e46f1075afa18f306715e7db87493e7b7e779569aa13c64ab3d09980b3560a3",
"foo",
"bar"].into_iter().map(|a| a.into()).collect();
let mut aot_machine = AotCompilingMachine::load(&buffer.clone(), None, ISA_IMC, VERSION0).unwrap();
let result = aot_machine.compile().unwrap();
b.iter(|| {
let mut machine = AsmMachine::default_with_aot_code(&result);
machine.load_program(&buffer, &args[..]).unwrap();
machine.run().unwrap()
});
});
}
#[cfg(has_asm)]
fn aot_compiling_benchmark(c: &mut Criterion) {
c.bench_function("compiling secp256k1_bench for aot", |b| {
let mut file = File::open("benches/data/secp256k1_bench").unwrap();
let mut buffer = Vec::new();
file.read_to_end(&mut buffer).unwrap();
let buffer = Bytes::from(buffer);
b.iter(|| {
AotCompilingMachine::load(&buffer.clone(), None, ISA_IMC, VERSION0)
.unwrap()
.compile()
.unwrap()
});
});
}
#[cfg(not(has_asm))]
criterion_group!(benches, interpret_benchmark,);
#[cfg(has_asm)]
criterion_group!(
benches,
interpret_benchmark,
asm_benchmark,
aot_benchmark,
aot_compiling_benchmark
);
criterion_main!(benches);
| 38.647619 | 181 | 0.593642 |
bf932003e3c0d4f451dd0325c4f5b8d09f84a89c | 49,227 | // Generated from definition io.k8s.api.extensions.v1beta1.NetworkPolicy
/// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods
#[derive(Clone, Debug, Default, PartialEq)]
pub struct NetworkPolicy {
/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
pub metadata: Option<crate::v1_15::apimachinery::pkg::apis::meta::v1::ObjectMeta>,
/// Specification of the desired behavior for this NetworkPolicy.
pub spec: Option<crate::v1_15::api::extensions::v1beta1::NetworkPolicySpec>,
}
// Begin extensions/v1beta1/NetworkPolicy
// Generated from operation createExtensionsV1beta1NamespacedNetworkPolicy
impl NetworkPolicy {
/// create a NetworkPolicy
///
/// Use the returned [`crate::ResponseBody`]`<`[`CreateNamespacedNetworkPolicyResponse`]`>` constructor, or [`CreateNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_namespaced_network_policy(
namespace: &str,
body: &crate::v1_15::api::extensions::v1beta1::NetworkPolicy,
optional: CreateNamespacedNetworkPolicyOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<CreateNamespacedNetworkPolicyResponse>), crate::RequestError> {
let CreateNamespacedNetworkPolicyOptional {
dry_run,
field_manager,
pretty,
} = optional;
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(dry_run) = dry_run {
__query_pairs.append_pair("dryRun", dry_run);
}
if let Some(field_manager) = field_manager {
__query_pairs.append_pair("fieldManager", field_manager);
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::post(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`NetworkPolicy::create_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct CreateNamespacedNetworkPolicyOptional<'a> {
/// When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
pub dry_run: Option<&'a str>,
/// fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
pub field_manager: Option<&'a str>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<CreateNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::create_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum CreateNamespacedNetworkPolicyResponse {
Ok(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Created(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Accepted(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for CreateNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedNetworkPolicyResponse::Ok(result), buf.len()))
},
http::StatusCode::CREATED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedNetworkPolicyResponse::Created(result), buf.len()))
},
http::StatusCode::ACCEPTED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedNetworkPolicyResponse::Accepted(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((CreateNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteExtensionsV1beta1CollectionNamespacedNetworkPolicy
impl NetworkPolicy {
/// delete collection of NetworkPolicy
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteCollectionNamespacedNetworkPolicyResponse`]`>` constructor, or [`DeleteCollectionNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_namespaced_network_policy(
namespace: &str,
delete_optional: crate::v1_15::DeleteOptional<'_>,
list_optional: crate::v1_15::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteCollectionNamespacedNetworkPolicyResponse>), crate::RequestError> {
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteCollectionNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::delete_collection_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteCollectionNamespacedNetworkPolicyResponse {
OkStatus(crate::v1_15::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_15::api::extensions::v1beta1::NetworkPolicyList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteCollectionNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedNetworkPolicyResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedNetworkPolicyResponse::OkValue(result), buf.len()))
}
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteCollectionNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteExtensionsV1beta1NamespacedNetworkPolicy
impl NetworkPolicy {
/// delete a NetworkPolicy
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteNamespacedNetworkPolicyResponse`]`>` constructor, or [`DeleteNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the NetworkPolicy
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_namespaced_network_policy(
name: &str,
namespace: &str,
optional: crate::v1_15::DeleteOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteNamespacedNetworkPolicyResponse>), crate::RequestError> {
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::delete_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteNamespacedNetworkPolicyResponse {
OkStatus(crate::v1_15::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Accepted(crate::v1_15::apimachinery::pkg::apis::meta::v1::Status),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedNetworkPolicyResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedNetworkPolicyResponse::OkValue(result), buf.len()))
}
},
http::StatusCode::ACCEPTED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((DeleteNamespacedNetworkPolicyResponse::Accepted(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation listExtensionsV1beta1NamespacedNetworkPolicy
impl NetworkPolicy {
/// list or watch objects of kind NetworkPolicy
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListNamespacedNetworkPolicyResponse`]`>` constructor, or [`ListNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_namespaced_network_policy(
namespace: &str,
optional: crate::v1_15::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNamespacedNetworkPolicyResponse>), crate::RequestError> {
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::list_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListNamespacedNetworkPolicyResponse {
Ok(crate::v1_15::api::extensions::v1beta1::NetworkPolicyList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListNamespacedNetworkPolicyResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation listExtensionsV1beta1NetworkPolicyForAllNamespaces
impl NetworkPolicy {
/// list or watch objects of kind NetworkPolicy
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListNetworkPolicyForAllNamespacesResponse`]`>` constructor, or [`ListNetworkPolicyForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_network_policy_for_all_namespaces(
optional: crate::v1_15::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNetworkPolicyForAllNamespacesResponse>), crate::RequestError> {
let __url = "/apis/extensions/v1beta1/networkpolicies?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListNetworkPolicyForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::list_network_policy_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListNetworkPolicyForAllNamespacesResponse {
Ok(crate::v1_15::api::extensions::v1beta1::NetworkPolicyList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListNetworkPolicyForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListNetworkPolicyForAllNamespacesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListNetworkPolicyForAllNamespacesResponse::Other(result), read))
},
}
}
}
// Generated from operation patchExtensionsV1beta1NamespacedNetworkPolicy
impl NetworkPolicy {
/// partially update the specified NetworkPolicy
///
/// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedNetworkPolicyResponse`]`>` constructor, or [`PatchNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the NetworkPolicy
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_network_policy(
name: &str,
namespace: &str,
body: &crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::v1_15::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedNetworkPolicyResponse>), crate::RequestError> {
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<PatchNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::patch_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum PatchNamespacedNetworkPolicyResponse {
Ok(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for PatchNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((PatchNamespacedNetworkPolicyResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((PatchNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation readExtensionsV1beta1NamespacedNetworkPolicy
impl NetworkPolicy {
/// read the specified NetworkPolicy
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedNetworkPolicyResponse`]`>` constructor, or [`ReadNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the NetworkPolicy
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_network_policy(
name: &str,
namespace: &str,
optional: ReadNamespacedNetworkPolicyOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedNetworkPolicyResponse>), crate::RequestError> {
let ReadNamespacedNetworkPolicyOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`NetworkPolicy::read_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedNetworkPolicyOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::read_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedNetworkPolicyResponse {
Ok(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedNetworkPolicyResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceExtensionsV1beta1NamespacedNetworkPolicy
impl NetworkPolicy {
/// replace the specified NetworkPolicy
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedNetworkPolicyResponse`]`>` constructor, or [`ReplaceNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the NetworkPolicy
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_network_policy(
name: &str,
namespace: &str,
body: &crate::v1_15::api::extensions::v1beta1::NetworkPolicy,
optional: ReplaceNamespacedNetworkPolicyOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedNetworkPolicyResponse>), crate::RequestError> {
let ReplaceNamespacedNetworkPolicyOptional {
dry_run,
field_manager,
pretty,
} = optional;
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(dry_run) = dry_run {
__query_pairs.append_pair("dryRun", dry_run);
}
if let Some(field_manager) = field_manager {
__query_pairs.append_pair("fieldManager", field_manager);
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`NetworkPolicy::replace_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReplaceNamespacedNetworkPolicyOptional<'a> {
/// When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
pub dry_run: Option<&'a str>,
/// fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
pub field_manager: Option<&'a str>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReplaceNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::replace_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReplaceNamespacedNetworkPolicyResponse {
Ok(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Created(crate::v1_15::api::extensions::v1beta1::NetworkPolicy),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReplaceNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedNetworkPolicyResponse::Ok(result), buf.len()))
},
http::StatusCode::CREATED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedNetworkPolicyResponse::Created(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReplaceNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation watchExtensionsV1beta1NamespacedNetworkPolicy
impl NetworkPolicy {
/// list or watch objects of kind NetworkPolicy
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchNamespacedNetworkPolicyResponse`]`>` constructor, or [`WatchNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_namespaced_network_policy(
namespace: &str,
optional: crate::v1_15::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNamespacedNetworkPolicyResponse>), crate::RequestError> {
let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::watch_namespaced_network_policy`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchNamespacedNetworkPolicyResponse {
Ok(crate::v1_15::apimachinery::pkg::apis::meta::v1::WatchEvent<NetworkPolicy>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchNamespacedNetworkPolicyResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchNamespacedNetworkPolicyResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchNamespacedNetworkPolicyResponse::Other(result), read))
},
}
}
}
// Generated from operation watchExtensionsV1beta1NetworkPolicyForAllNamespaces
impl NetworkPolicy {
/// list or watch objects of kind NetworkPolicy
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchNetworkPolicyForAllNamespacesResponse`]`>` constructor, or [`WatchNetworkPolicyForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_network_policy_for_all_namespaces(
optional: crate::v1_15::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNetworkPolicyForAllNamespacesResponse>), crate::RequestError> {
let __url = "/apis/extensions/v1beta1/networkpolicies?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchNetworkPolicyForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::watch_network_policy_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchNetworkPolicyForAllNamespacesResponse {
Ok(crate::v1_15::apimachinery::pkg::apis::meta::v1::WatchEvent<NetworkPolicy>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchNetworkPolicyForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchNetworkPolicyForAllNamespacesResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchNetworkPolicyForAllNamespacesResponse::Other(result), read))
},
}
}
}
// End extensions/v1beta1/NetworkPolicy
impl crate::Resource for NetworkPolicy {
fn api_version() -> &'static str {
"extensions/v1beta1"
}
fn group() -> &'static str {
"extensions"
}
fn kind() -> &'static str {
"NetworkPolicy"
}
fn version() -> &'static str {
"v1beta1"
}
}
impl crate::Metadata for NetworkPolicy {
type Ty = crate::v1_15::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> {
self.metadata.as_ref()
}
}
impl<'de> serde::Deserialize<'de> for NetworkPolicy {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_metadata,
Key_spec,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"metadata" => Field::Key_metadata,
"spec" => Field::Key_spec,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = NetworkPolicy;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct NetworkPolicy")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_metadata: Option<crate::v1_15::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_spec: Option<crate::v1_15::api::extensions::v1beta1::NetworkPolicySpec> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::api_version() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version()));
}
},
Field::Key_kind => {
let value_kind: String = serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::kind() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind()));
}
},
Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_spec => value_spec = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(NetworkPolicy {
metadata: value_metadata,
spec: value_spec,
})
}
}
deserializer.deserialize_struct(
"NetworkPolicy",
&[
"apiVersion",
"kind",
"metadata",
"spec",
],
Visitor,
)
}
}
impl serde::Serialize for NetworkPolicy {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"NetworkPolicy",
2 +
self.metadata.as_ref().map_or(0, |_| 1) +
self.spec.as_ref().map_or(0, |_| 1),
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?;
serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?;
if let Some(value) = &self.metadata {
serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?;
}
if let Some(value) = &self.spec {
serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
| 45.454294 | 256 | 0.586386 |
ac70ceb953ebd09cf6ba85927ad5c2bd8bf3401b | 1,626 | #![cfg(test)]
mod acceptance {
use itertools::Itertools;
use std::env;
use std::path::PathBuf;
use std::process::{Command, Output};
fn retrieve_stdout(output: &Output) -> String {
String::from_utf8_lossy(&output.stdout)
.to_string()
.lines()
.filter(|s| !s.is_empty())
.map(|line| match line.find("; finished in") {
Some(idx) => &line[0..idx],
None => line,
})
.sorted()
.join("\n")
}
#[test]
fn basic() {
let output = Command::new("cargo")
.current_dir(PathBuf::from("acceptance_tests").join("basic"))
.args(&["test"])
.output()
.expect("cargo command failed to start");
let lines = retrieve_stdout(&output);
insta::assert_display_snapshot!(lines);
}
#[test]
fn hamcrest_assertions() {
let output = Command::new("cargo")
.current_dir(PathBuf::from("acceptance_tests").join("hamcrest_assertions"))
.args(&["test"])
.output()
.expect("cargo command failed to start");
let lines = retrieve_stdout(&output);
insta::assert_display_snapshot!(lines);
}
#[test]
fn r#async() {
let output = Command::new("cargo")
.current_dir(PathBuf::from("acceptance_tests").join("async"))
.args(&["test"])
.output()
.expect("cargo command failed to start");
let lines = retrieve_stdout(&output);
insta::assert_display_snapshot!(lines);
}
}
| 28.034483 | 87 | 0.52337 |
e92bb801f2f2e43b14e83319fc66407582ba241a | 9,204 | //! Low-level structured concurrency.
//!
//! This module gives you everything you need to spawn a task but does not do the actual spawning.
pub mod signals;
use self::signals::*;
use crate::{ChildSignals, ParentSignals, RawScopedSpawn, SignalReceiver, SignalSender};
use core::future::Future;
use futures::channel::oneshot;
use futures::future;
use futures::pin_mut;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
#[cfg(debug_assertions)]
const EXPECT_SPAWNER_NO_OWNER: &str =
"Remote spawner is still owned after we dropped the its owner future. \
It's considered a programming error to send the spawner anywhere \
other than the task owning the spawner. Please check that the \
spawner isn't sent to another task or thread, or stored in any \
global variable. This check does not cover all possible errors, and \
is only performed if debug assertions are enabled.";
#[cfg(debug_assertions)]
const EXPECT_SPAWNER_NO_CONTENTION: &str =
"Another thread is using the remote spawner to spawn a task. \
It's considered a programming error to send the spawner anywhere \
other than the task owning the spawner. Please check that the \
spawner isn't sent to another task or thread, or stored in any \
global variable. This check does not cover all possible errors, and \
is only performed if debug assertions are enabled.";
/// A scope for a child task.
pub struct RemoteScope {
spawner: Option<RemoteSpawner>,
}
/// A spawner.
///
/// This type is returned by `RemoteScope::spawner`.
///
/// This type implements `RawScopedSpawn`.
#[derive(Clone)]
pub struct RemoteSpawner {
child_cancel_senders: Arc<Mutex<HashMap<usize, oneshot::Sender<()>>>>,
child_done_receivers: Arc<Mutex<HashMap<usize, oneshot::Receiver<()>>>>,
#[cfg(debug_assertions)]
owner_test: Arc<Mutex<()>>, // Used to track if the spawner was sent anywhere else
}
impl RemoteScope {
/// Constructs a new `RemoteScope`.
pub fn new() -> Self {
Self { spawner: None }
}
/// Returns a spawner for the scoped child task.
///
/// It may be cheaper if this method is not called.
pub fn spawner(&mut self) -> RemoteSpawner {
match &self.spawner {
Some(spawner) => spawner.clone(),
None => {
let spawner = RemoteSpawner::new();
self.spawner = Some(spawner.clone());
spawner
}
}
}
/// Wraps `signal`, `fut`, and `done` into a future suitable to be run in a new task.
///
/// The returned future will handle all signals for structured concurrency. It will also handle
/// termination as described in the library overview.
pub fn wrap<ParentCancelReceiver, ParentDoneSender, Fut, Done>(
self,
signal: ChildSignals<ParentCancelReceiver, ParentDoneSender>,
fut: Fut,
done: Done,
) -> impl Future<Output = ()>
where
ParentCancelReceiver: SignalReceiver,
ParentDoneSender: SignalSender,
Fut: Future<Output = ()>,
Done: FnOnce(),
{
struct DropOrder<ParentCancelReceiver, ParentDoneSender, Fut, Done> {
spawner: Option<RemoteSpawner>,
fut: Fut,
done: Done,
parent_cancel_receiver: ParentCancelReceiver,
parent_done_sender: ParentDoneSender,
}
let data = DropOrder {
spawner: self.spawner,
fut,
done,
parent_cancel_receiver: signal.cancel_receiver,
parent_done_sender: signal.done_sender,
};
async move {
{
{
let parent_cancel_receiver = data.parent_cancel_receiver;
pin_mut!(parent_cancel_receiver);
let fut = data.fut;
pin_mut!(fut);
future::select(parent_cancel_receiver, fut).await;
}
if let Some(spawner) = data.spawner {
#[cfg(debug_assertions)]
{
// It's not allowed to send the spawner anywhere else
Arc::try_unwrap(spawner.owner_test).expect(EXPECT_SPAWNER_NO_OWNER);
}
{
// Drop child cancel senders
*spawner.child_cancel_senders.lock().unwrap() = HashMap::new();
}
let receivers = {
core::mem::replace(
&mut *spawner.child_done_receivers.lock().unwrap(),
HashMap::new(),
)
};
for receiver in receivers {
let _ = receiver.1.await;
}
}
{
let done = data.done;
done();
}
}
drop(data.parent_done_sender);
}
}
}
impl Default for RemoteScope {
fn default() -> RemoteScope {
Self::new()
}
}
impl RemoteSpawner {
fn new() -> Self {
Self {
child_cancel_senders: Arc::new(Mutex::new(HashMap::new())),
child_done_receivers: Arc::new(Mutex::new(HashMap::new())),
#[cfg(debug_assertions)]
owner_test: Arc::new(Mutex::new(())),
}
}
}
impl RawScopedSpawn for RemoteSpawner {
type CancelReceiver = RemoteCancelReceiver;
type DoneSender = RemoteDoneSender;
type CancelSenderWithSignal = RemoteCancelSenderWithSignal;
type DoneReceiverWithSignal = RemoteDoneReceiverWithSignal;
type CancelReceiverWithSignal = RemoteCancelReceiverWithSignal;
type DoneSenderWithSignal = RemoteDoneSenderWithSignal;
fn spawn_raw(&self) -> ChildSignals<Self::CancelReceiver, Self::DoneSender> {
let (cancel_sender, cancel_receiver) = oneshot::channel();
let cancel_sender_id = Box::pin(0u8);
let (done_sender, done_receiver) = oneshot::channel();
let done_receiver_id = Box::pin(0u8);
#[cfg(debug_assertions)]
let _guard = self
.owner_test
.try_lock()
.expect(EXPECT_SPAWNER_NO_CONTENTION);
self.child_cancel_senders
.lock()
.unwrap()
.insert((&*cancel_sender_id) as *const u8 as usize, cancel_sender);
self.child_done_receivers
.lock()
.unwrap()
.insert((&*done_receiver_id) as *const u8 as usize, done_receiver);
ChildSignals {
cancel_receiver: RemoteCancelReceiver {
receiver: cancel_receiver,
sender_id: cancel_sender_id,
senders: self.child_cancel_senders.clone(),
},
done_sender: RemoteDoneSender {
_sender: done_sender,
receiver_id: done_receiver_id,
receivers: self.child_done_receivers.clone(),
},
}
}
fn spawn_raw_with_signal(
&self,
) -> super::ParentChildSignals<
Self::CancelSenderWithSignal,
Self::DoneReceiverWithSignal,
Self::CancelReceiverWithSignal,
Self::DoneSenderWithSignal,
> {
let (cancel_sender_root, cancel_receiver_root) = oneshot::channel();
let cancel_sender_root_id = Box::pin(0u8);
let (cancel_sender_leaf, cancel_receiver_leaf) = oneshot::channel();
let (done_sender_root, done_receiver_root) = oneshot::channel();
let done_receiver_root_id = Box::pin(0u8);
let (done_sender_leaf, done_receiver_leaf) = oneshot::channel();
#[cfg(debug_assertions)]
let _guard = self
.owner_test
.try_lock()
.expect(EXPECT_SPAWNER_NO_CONTENTION);
self.child_cancel_senders.lock().unwrap().insert(
(&*cancel_sender_root_id) as *const u8 as usize,
cancel_sender_root,
);
self.child_done_receivers.lock().unwrap().insert(
(&*done_receiver_root_id) as *const u8 as usize,
done_receiver_root,
);
(
ParentSignals {
cancel_sender: RemoteCancelSenderWithSignal {
sender: cancel_sender_leaf,
},
done_receiver: RemoteDoneReceiverWithSignal {
receiver: done_receiver_leaf,
},
},
ChildSignals {
cancel_receiver: RemoteCancelReceiverWithSignal {
receiver_root: cancel_receiver_root,
receiver_leaf: Some(cancel_receiver_leaf),
sender_id: cancel_sender_root_id,
senders: self.child_cancel_senders.clone(),
},
done_sender: RemoteDoneSenderWithSignal {
_sender_root: done_sender_root,
_sender_leaf: done_sender_leaf,
receiver_id: done_receiver_root_id,
receivers: self.child_done_receivers.clone(),
},
},
)
}
}
| 34.996198 | 99 | 0.579639 |
26d8ae055bd177c4452bd3109d8e1dde86ecc450 | 95 |
#[cfg(test)]
mod tests {
// #[test]
// fn t() {
// assert!(6 == 6)
// }
}
| 10.555556 | 26 | 0.315789 |
87407db04e7bdf1646f8d0144579ef5dd129df14 | 5,008 | use crate::func::FuncRef;
use crate::module::check_limits;
use crate::Error;
use alloc::{format, rc::Rc, vec, vec::Vec};
use core::cell::RefCell;
use core::fmt;
use core::u32;
use parity_wasm::elements::ResizableLimits;
/// Reference to a table (See [`TableInstance`] for details).
///
/// This reference has a reference-counting semantics.
///
/// [`TableInstance`]: struct.TableInstance.html
///
#[derive(Clone, Debug)]
pub struct TableRef(Rc<TableInstance>);
impl ::core::ops::Deref for TableRef {
type Target = TableInstance;
fn deref(&self) -> &TableInstance {
&self.0
}
}
/// Runtime representation of a table.
///
/// A table is a array of untyped functions. It allows wasm code to call functions
/// indirectly through a dynamic index into a table. For example, this allows emulating function
/// pointers by way of table indices.
///
/// Table is created with an initial size but can be grown dynamically via [`grow`] method.
/// Growth can be limited by an optional maximum size.
///
/// In future, a table might be extended to be able to hold not only functions but different types.
///
/// [`grow`]: #method.grow
///
pub struct TableInstance {
/// Table limits.
limits: ResizableLimits,
/// Table memory buffer.
buffer: RefCell<Vec<Option<FuncRef>>>,
}
impl fmt::Debug for TableInstance {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TableInstance")
.field("limits", &self.limits)
.field("buffer.len", &self.buffer.borrow().len())
.finish()
}
}
impl TableInstance {
/// Allocate a table instance.
///
/// The table allocated with initial size, specified by `initial_size`.
/// Maximum size can be specified by `maximum_size`.
///
/// All table elements are allocated uninitialized.
///
/// # Errors
///
/// Returns `Err` if `initial_size` is greater than `maximum_size`.
pub fn alloc(
initial_size: u32,
maximum_size: Option<u32>,
) -> Result<TableRef, Error> {
let table = TableInstance::new(ResizableLimits::new(
initial_size,
maximum_size,
))?;
Ok(TableRef(Rc::new(table)))
}
fn new(limits: ResizableLimits) -> Result<TableInstance, Error> {
check_limits(&limits)?;
Ok(TableInstance {
buffer: RefCell::new(vec![None; limits.initial() as usize]),
limits,
})
}
/// Return table limits.
pub(crate) fn limits(&self) -> &ResizableLimits {
&self.limits
}
/// Returns size this table was created with.
pub fn initial_size(&self) -> u32 {
self.limits.initial()
}
/// Returns maximum size `TableInstance` can grow to.
pub fn maximum_size(&self) -> Option<u32> {
self.limits.maximum()
}
/// Returns current size of the table.
pub fn current_size(&self) -> u32 {
self.buffer.borrow().len() as u32
}
/// Increases the size of the table by given number of elements.
///
/// # Errors
///
/// Returns `Err` if tried to allocate more elements than permited by limit.
pub fn grow(&self, by: u32) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let maximum_size = self.maximum_size().unwrap_or(u32::MAX);
let new_size = self
.current_size()
.checked_add(by)
.and_then(|new_size| {
if maximum_size < new_size {
None
} else {
Some(new_size)
}
})
.ok_or_else(|| {
Error::Table(format!(
"Trying to grow table by {} items when there are already {} items",
by,
self.current_size(),
))
})?;
buffer.resize(new_size as usize, None);
Ok(())
}
/// Get the specific value in the table
pub fn get(&self, offset: u32) -> Result<Option<FuncRef>, Error> {
let buffer = self.buffer.borrow();
let buffer_len = buffer.len();
let table_elem = buffer.get(offset as usize).cloned().ok_or_else(|| {
Error::Table(format!(
"trying to read table item with index {} when there are only {} items",
offset, buffer_len
))
})?;
Ok(table_elem)
}
/// Set the table element to the specified function.
pub fn set(
&self,
offset: u32,
value: Option<FuncRef>,
) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let buffer_len = buffer.len();
let table_elem = buffer.get_mut(offset as usize).ok_or_else(|| {
Error::Table(format!(
"trying to update table item with index {} when there are only {} items",
offset, buffer_len
))
})?;
*table_elem = value;
Ok(())
}
}
| 30.351515 | 99 | 0.572883 |
d9dd0f84f5a36e30f4732034b2779bff4c8d496c | 4,601 | use derive_more::{AsMut, AsRef, Deref, DerefMut, From, Into};
use nalgebra::{Point3, Unit, Vector3, Vector4};
/// This trait is implemented for homogeneous projective 3d coordinate.
pub trait Projective: From<Vector4<f64>> + Clone + Copy {
/// Retrieve the homogeneous vector.
///
/// No constraints are put on this vector. All components can move freely and it is not normalized.
/// However, this vector may be normalized if desired and it will still be equivalent to the original.
/// You may wish to normalize it if you want to avoid floating point precision issues, for instance.
fn homogeneous(self) -> Vector4<f64>;
/// Retrieve the euclidean 3d point by normalizing the homogeneous coordinate.
///
/// This may fail, as a homogeneous coordinate can exist at near-infinity (like a star in the sky),
/// whereas a 3d euclidean point cannot (it would overflow).
fn point(self) -> Option<Point3<f64>> {
Point3::from_homogeneous(self.homogeneous())
}
/// Convert the euclidean 3d point into homogeneous coordinates.
fn from_point(point: Point3<f64>) -> Self {
point.to_homogeneous().into()
}
/// Retrieve the normalized bearing of the coordinate.
fn bearing(self) -> Unit<Vector3<f64>> {
Unit::new_normalize(self.bearing_unnormalized())
}
/// Retrieve the unnormalized bearing of the coordinate.
///
/// Use this when you know that you do not need the bearing to be normalized,
/// and it may increase performance. Otherwise use [`Projective::bearing`].
fn bearing_unnormalized(self) -> Vector3<f64> {
self.homogeneous().xyz()
}
}
/// A 3d point which is relative to the camera's optical center and orientation where
/// the positive X axis is right, positive Y axis is down, and positive Z axis is forwards
/// from the optical center of the camera. The unit of distance of a `CameraPoint` is
/// unspecified and relative to the current reconstruction.
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, AsMut, AsRef, Deref, DerefMut, From, Into)]
pub struct CameraPoint(pub Vector4<f64>);
impl Projective for CameraPoint {
fn homogeneous(self) -> Vector4<f64> {
self.into()
}
}
/// A point in "world" coordinates.
/// This means that the real-world units of the pose are unknown, but the
/// unit of distance and orientation are the same as the current reconstruction.
///
/// The reason that the unit of measurement is typically unknown is because if
/// the whole world is scaled by any factor `n` (excluding the camera itself), then
/// the normalized image coordinates will be exactly same on every frame. Due to this,
/// the scaling of the world is chosen arbitrarily.
///
/// To extract the real scale of the world, a known distance between two `WorldPoint`s
/// must be used to scale the whole world (and all translations between cameras). At
/// that point, the world will be appropriately scaled. It is recommended not to make
/// the `WorldPoint` in the reconstruction scale to the "correct" scale. This is for
/// two reasons:
///
/// Firstly, because it is possible for scale drift to occur due to the above situation,
/// the further in the view graph you go from the reference measurement, the more the scale
/// will drift from the reference. It would give a false impression that the scale is known
/// globally when it is only known locally if the whole reconstruction was scaled.
///
/// Secondly, as the reconstruction progresses, the reference points might get rescaled
/// as optimization of the reconstruction brings everything into global consistency.
/// This means that, while the reference points would be initially scaled correctly,
/// any graph optimization might cause them to drift in scale as well.
///
/// Please scale your points on-demand. When you need to know a real distance in the
/// reconstruction, please use the closest known refenence in the view graph to scale
/// it appropriately. In the future we will add APIs to utilize references
/// as optimization constraints when a known reference reconstruction is present.
///
/// If you must join two reconstructions, please solve for the similarity (rotation, translation and scale)
/// between the two reconstructions using an optimizer. APIs will eventually be added to perform this operation
/// as well.
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, AsMut, AsRef, Deref, DerefMut, From, Into)]
pub struct WorldPoint(pub Vector4<f64>);
impl Projective for WorldPoint {
fn homogeneous(self) -> Vector4<f64> {
self.into()
}
}
| 48.946809 | 111 | 0.725494 |
7abdd51ca3e538cf2f3ad270e87a7a5d154c1e95 | 1,409 | use crate::cdsl::isa::{TargetIsa, TargetIsaBuilder};
use crate::cdsl::regs::{RegBankBuilder, RegClassBuilder};
use crate::cdsl::settings::{SettingGroup, SettingGroupBuilder};
fn define_settings(_shared: &SettingGroup) -> SettingGroup {
let setting = SettingGroupBuilder::new("arm64");
setting.finish()
}
pub fn define(shared_settings: &SettingGroup) -> TargetIsa {
let mut isa = TargetIsaBuilder::new("arm64", define_settings(shared_settings));
// The `x31` regunit serves as the stack pointer / zero register depending on context. We
// reserve it and don't model the difference.
let builder = RegBankBuilder::new("IntRegs", "x")
.units(32)
.track_pressure(true);
let int_regs = isa.add_reg_bank(builder);
let builder = RegBankBuilder::new("FloatRegs", "v")
.units(32)
.track_pressure(true);
let float_regs = isa.add_reg_bank(builder);
let builder = RegBankBuilder::new("FlagRegs", "")
.units(1)
.names(vec!["nzcv"])
.track_pressure(false);
let flag_reg = isa.add_reg_bank(builder);
let builder = RegClassBuilder::new_toplevel("GPR", int_regs);
isa.add_reg_class(builder);
let builder = RegClassBuilder::new_toplevel("FPR", float_regs);
isa.add_reg_class(builder);
let builder = RegClassBuilder::new_toplevel("FLAG", flag_reg);
isa.add_reg_class(builder);
isa.finish()
}
| 33.547619 | 93 | 0.685593 |
7109e40e6d28a04853ae004fdc93bc098d55df1d | 3,749 | mod primitive_matchers;
mod scope;
mod macros;
mod event_matchers;
mod precondition;
mod actions;
use crate::config::versions::ConfigVersionProcessor;
use crate::config::raw_config::{RCHash, AccessHelpers, RawConfig};
use crate::config::{ConfigError, Config};
use crate::config::versions::version1::scope::build_scope;
use crate::config::versions::version1::macros::build_scope_macros;
use crate::config::versions::version1::primitive_matchers::build_string_matcher;
pub (crate) struct Version1Processor {
// Ideas:
// - Ability to specify how strict to be: If there's any error in trying to parse a
// data structure, continue by discarding it as null, perhaps adding a warning into
// a list kept in this struct. Otherwise (if strict mode) fail entire config load for
// any incorrect/missing data encountered
}
impl Version1Processor {
/// Provides an instance of Version1Processor presented as "an implementation of
/// `ConfigVersionProcessor`"
pub (crate) fn new() -> Box<dyn ConfigVersionProcessor> {
Box::new(Version1Processor {})
}
}
impl ConfigVersionProcessor for Version1Processor {
/// Processes a top level RCHash into a fully formed Config instances, or returns a ConfigError
/// if something doesn't work out correctly.
///
/// ## Notes on the version 1 format
///
/// At the top level, there are 3 possible expected fields:
/// - `scopes`:
/// Contains window class/name matching, as well as a list of macros that apply to that
/// scope. Note that in the parsed Config struct, this is organised differently; there is
/// one list of macros, each of which may or may not come with a scope. In the program it
/// is more practical that way, but in the context of authoring a configuration file, it
/// makes sense to specify a series of macros that apply to a given scope.
/// - `global_macros`:
/// Contains all macros that apply regardless of focused window: macros without a scope.
///
/// Further documentation and examples on the format can be found in /docs/config.md
///
/// ## Arguments
/// raw_config: Top level hash parsed from the config input file
fn process(&self, raw_config: RCHash) -> Result<Config, ConfigError> {
const MIDI_DEVICE_FIELD: &str = "midi_device";
const SCOPES_FIELD: &str = "scopes";
const MACROS_FIELD: &str = "macros";
const GLOBAL_MACROS_FIELD: &str = "global_macros";
let mut config = Config {
midi_device_matcher: None,
macros: vec![]
};
if let Some(raw_midi_device_matcher) = raw_config.get_hash(MIDI_DEVICE_FIELD) {
let midi_matcher = build_string_matcher(Some(raw_midi_device_matcher))?;
config.midi_device_matcher = midi_matcher;
}
if let Some(raw_scopes) = raw_config.get_array(SCOPES_FIELD) {
for raw_scope in raw_scopes {
if let RawConfig::Hash(raw_scope) = raw_scope {
let scope = build_scope(raw_scope)?;
if let None = scope { continue; }
let raw_macros = raw_scope.get_array(MACROS_FIELD);
if let None = raw_macros { continue; }
config.macros.extend(
build_scope_macros(
raw_macros.unwrap(),
Some(scope.unwrap())
)?
);
}
}
}
if let Some(raw_macros) = raw_config.get_array(GLOBAL_MACROS_FIELD) {
config.macros.extend(build_scope_macros(raw_macros, None)?);
}
Ok(config)
}
}
| 39.882979 | 99 | 0.634836 |
f4aa975611f5789ca97c61a03ea9e6b88351910d | 2,726 | // This file is part of Sulis, a turn based RPG written in Rust.
// Copyright 2018 Jared Stephen
//
// Sulis is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Sulis is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Sulis. If not, see <http://www.gnu.org/licenses/>
use std::fmt::{self, Display};
use std::io::Error;
use std::path::PathBuf;
use sulis_core::config::{self, Config};
use sulis_core::resource::{read_single_resource, subdirs};
pub fn get_available_modifications() -> Vec<ModificationInfo> {
let root_dir = Config::resources_config().mods_directory;
let mut user_dir = config::USER_DIR.clone();
user_dir.push(&root_dir);
let mut mods = Vec::new();
let mut dirs = Vec::new();
match subdirs(&root_dir) {
Ok(mut subdirs) => dirs.append(&mut subdirs),
Err(e) => warn!("Unable to read mods from '{}': {}", root_dir, e),
}
match subdirs(&user_dir) {
Ok(mut subdirs) => dirs.append(&mut subdirs),
Err(e) => warn!("Unable to read mods from '{:?}': {}", user_dir, e),
}
for dir in dirs {
match ModificationInfo::from_dir(dir.clone()) {
Ok(modi) => mods.push(modi),
Err(e) => warn!("Error reading module from '{:?}': {}", dir, e),
}
}
mods
}
#[derive(Debug, Clone)]
pub struct ModificationInfo {
pub id: String,
pub name: String,
pub description: String,
pub dir: String,
}
impl Display for ModificationInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.name)
}
}
impl ModificationInfo {
pub fn from_dir(path: PathBuf) -> Result<ModificationInfo, Error> {
let path_str = path.to_string_lossy().to_string();
let builder: ModificationInfoBuilder = read_single_resource(&format!("{}/mod", path_str))?;
Ok(ModificationInfo {
name: builder.name,
description: builder.description,
id: builder.id,
dir: path_str,
})
}
}
#[derive(Deserialize, Debug)]
#[serde(deny_unknown_fields)]
pub struct ModificationInfoBuilder {
pub id: String,
pub name: String,
pub description: String,
}
| 31.333333 | 100 | 0.621423 |
5deb7aca15b109fa658ff572c42340803b53408d | 702 | #![no_std]
extern crate alloc;
use alloc::string::ToString;
use contract::{
contract_api::{runtime, storage},
unwrap_or_revert::UnwrapOrRevert,
};
use types::{ApiError, ContractRef, Key, URef};
#[no_mangle]
pub extern "C" fn call() {
let contract_key: Key = runtime::get_key("hello_ext").unwrap_or_revert_with(ApiError::GetKey);
let contract_pointer: ContractRef = match contract_key {
Key::Hash(hash) => ContractRef::Hash(hash),
_ => runtime::revert(ApiError::UnexpectedKeyVariant),
};
let result: URef = runtime::call_contract(contract_pointer, ());
let value = storage::read(result);
assert_eq!(Ok(Some("Hello, world!".to_string())), value);
}
| 26 | 98 | 0.680912 |
e994972928c933a3ea1dfad139e95051de7e0eca | 3,407 | use crate::utils::bin_to_int;
fn to_digits(row: &str) -> Vec<u32> {
row.chars().map(|c| c.to_digit(10).unwrap()).collect()
}
// TODO: refactor out the code that parse a line
// TODO: refactor out the code that process matrix by columns
pub fn part1(xs: Vec<&str>) -> u32 {
let ones = xs
.iter()
.map(|&row| to_digits(row))
.fold(vec![0; xs[0].len()], |acc, cells| {
// process (+) each line with the accumulator
acc.iter()
.zip(cells)
.map(|(a, b)| a + b)
.collect::<Vec<u32>>()
});
let total_lines = xs.len() as u32;
let binary_gamma: Vec<u32> = ones
.iter()
.map(|&i| if i >= (total_lines - i) { 1 } else { 0 })
.collect();
let binary_epsilon: Vec<u32> = ones
.iter()
.map(|&i| if i < (total_lines - i) { 1 } else { 0 })
.collect();
bin_to_int(binary_gamma) * bin_to_int(binary_epsilon)
}
pub fn part2(xs: Vec<&str>) -> u32 {
let first = xs[0];
let mut ys: Vec<Vec<u32>> = xs
.clone()
.iter()
.map(|&row| to_digits(row))
//.map(|&line| line.chars().collect::<Vec<char>>()[i])
.collect();
let mut oxy: Vec<u32> = vec![];
for i in 0..first.len() {
let ichars = &ys.iter().map(|row| row[i]).collect::<Vec<u32>>();
let onec = ichars.iter().filter(|&x| *x == 1).count();
let zeroc = ichars.iter().filter(|&x| *x == 0).count();
let max = onec.max(zeroc);
let maxc = if max == onec { 1 } else { 0 };
ys = ys.iter().filter(|line| line[i] == maxc).cloned().collect();
println!("Filter with {} {:?}", maxc, ys);
if ys.len() == 1 {
oxy = ys.get(0).unwrap().clone();
break;
}
}
let mut ys: Vec<Vec<u32>> = xs
.clone()
.iter()
.map(|&row| to_digits(row))
//.map(|&line| line.chars().collect::<Vec<char>>()[i])
.collect();
let mut co2: Vec<u32> = vec![];
for i in 0..first.len() {
let ichars = &ys.iter().map(|row| row[i]).collect::<Vec<u32>>();
let onec = ichars.iter().filter(|&x| *x == 1).count();
let zeroc = ichars.iter().filter(|&x| *x == 0).count();
let max = onec.max(zeroc);
let minc = if max == onec { 0 } else { 1 };
ys = ys.iter().filter(|line| line[i] == minc).cloned().collect();
if ys.len() == 1 {
co2 = ys.get(0).unwrap().clone();
break;
}
}
bin_to_int(oxy) * bin_to_int(co2)
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
#[test]
fn test_p1() {
let s = fs::read_to_string("src/input03").expect("Cannot read file");
let _s = "00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010";
let xs = s.trim().split("\n").collect::<Vec<&str>>();
let r = part1(xs);
assert_eq!(r, 2954600);
}
#[test]
fn test_p2() {
let s = fs::read_to_string("src/input03").expect("Cannot read file");
let _s = "00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010";
let xs = s.trim().split("\n").collect::<Vec<&str>>();
let r = part2(xs);
assert_eq!(r, 1662846);
}
#[test]
fn test_binary() {
let r = vec![1, 0, 0, 1];
assert_eq!(bin_to_int(r), 9);
}
}
| 25.425373 | 77 | 0.497505 |
e9fef965eaad89b2d54ca84271874c01042b396a | 617 | mod add;
pub use add::*;
mod and;
pub use and::*;
mod call;
pub use call::*;
mod cp;
pub use cp::*;
mod dec;
pub use dec::*;
mod halt;
pub use halt::*;
mod inc;
pub use inc::*;
mod interrupts;
pub use interrupts::*;
mod jump;
pub use jump::*;
mod load;
pub use load::*;
mod nop;
pub use nop::*;
mod operation;
pub use operation::*;
mod or;
pub use or::*;
mod pop;
pub use pop::*;
mod prefix;
pub use prefix::*;
mod push;
pub use push::*;
mod ret;
pub use ret::*;
mod rst;
pub use rst::*;
mod rotate;
pub use rotate::*;
mod stop;
pub use stop::*;
mod sub;
pub use sub::*;
mod xor;
pub use xor::*;
| 9.348485 | 22 | 0.6094 |
56b7e65f75e1dee384034cc5e9e90c49962f9270 | 562 | /*
Demonstrates that assuming `<A, A, A, A>` is same as `A = A`.
*/
use prop::*;
use path_semantics::*;
/// This proof shows that one can prove `A = A` with
/// a safe assumption using the core axiom.
pub fn proof_1<A: LProp>() -> Eq<A, A>
// Try comment the next line to trigger an error.
where A::N: nat::Lt<A::N>
{
let p = assume_naive::<A, A, A, A>();
p((eq::refl(), (imply::id(), imply::id())))
}
/// Here is a shorter proof, which is equivalent to the first one.
pub fn proof_2<A: LProp>() -> Eq<A, A> {
eq::refl()
}
fn main() {}
| 21.615385 | 66 | 0.580071 |
e800d905e6b3766e1c1981f7597fb46648a8cf10 | 1,237 | use std::net::IpAddr;
use uuid::Uuid;
use frame::frame_result::{ColType, ColTypeOption, ColTypeOptionValue};
use types::{AsRust, AsRustType, CBytes};
use types::data_serialization_types::*;
use types::map::Map;
use types::udt::UDT;
use types::tuple::Tuple;
use types::blob::Blob;
use error::{Error, Result};
// TODO: consider using pointers to ColTypeOption and Vec<CBytes> instead of owning them.
#[derive(Debug)]
pub struct List {
/// column spec of the list, i.e. id should be List as it's a list and value should contain
/// a type of list items.
metadata: ColTypeOption,
data: Vec<CBytes>,
}
impl List {
pub fn new(data: Vec<CBytes>, metadata: ColTypeOption) -> List {
List { metadata: metadata,
data: data, }
}
fn map<T, F>(&self, f: F) -> Vec<T>
where F: FnMut(&CBytes) -> T
{
self.data.iter().map(f).collect()
}
}
impl AsRust for List {}
list_as_rust!(Blob);
list_as_rust!(String);
list_as_rust!(bool);
list_as_rust!(i64);
list_as_rust!(i32);
list_as_rust!(i16);
list_as_rust!(i8);
list_as_rust!(f64);
list_as_rust!(f32);
list_as_rust!(IpAddr);
list_as_rust!(Uuid);
list_as_rust!(List);
list_as_rust!(Map);
list_as_rust!(UDT);
list_as_rust!(Tuple);
| 24.254902 | 95 | 0.671787 |
9b7df8d1ccab4db89d804ad373fdc652f8fc1203 | 3,264 | #![no_std]
#![no_main]
extern crate panic_halt;
use arduino_mega2560::prelude::*;
#[arduino_mega2560::entry]
fn main() -> ! {
let dp = arduino_mega2560::Peripherals::take().unwrap();
let mut pins = arduino_mega2560::Pins::new(
dp.PORTA,
dp.PORTB,
dp.PORTC,
dp.PORTD,
dp.PORTE,
dp.PORTF,
dp.PORTG,
dp.PORTH,
dp.PORTJ,
dp.PORTK,
dp.PORTL,
);
let mut delay = arduino_mega2560::Delay::new();
let mut serial = arduino_mega2560::Serial::new(
dp.USART0,
pins.d0,
pins.d1.into_output(&mut pins.ddr),
57600.into_baudrate(),
);
ufmt::uwriteln!(&mut serial, "Reading analog inputs ...\r").void_unwrap();
let mut adc = arduino_mega2560::adc::Adc::new(dp.ADC, Default::default());
let (vbg, gnd): (u16, u16) = (
nb::block!(adc.read(&mut arduino_mega2560::adc::channel::Vbg)).void_unwrap(),
nb::block!(adc.read(&mut arduino_mega2560::adc::channel::Gnd)).void_unwrap(),
);
ufmt::uwriteln!(&mut serial, "Vbandgap: {}\r", vbg).void_unwrap();
ufmt::uwriteln!(&mut serial, "GND: {}\r", gnd).void_unwrap();
let mut a0 = pins.a0.into_analog_input(&mut adc);
let mut a1 = pins.a1.into_analog_input(&mut adc);
let mut a2 = pins.a2.into_analog_input(&mut adc);
let mut a3 = pins.a3.into_analog_input(&mut adc);
let mut a4 = pins.a4.into_analog_input(&mut adc);
let mut a5 = pins.a5.into_analog_input(&mut adc);
let mut a6 = pins.a6.into_analog_input(&mut adc);
let mut a7 = pins.a7.into_analog_input(&mut adc);
let mut a8 = pins.a8.into_analog_input(&mut adc);
let mut a9 = pins.a9.into_analog_input(&mut adc);
let mut a10 = pins.a10.into_analog_input(&mut adc);
let mut a11 = pins.a11.into_analog_input(&mut adc);
let mut a12 = pins.a12.into_analog_input(&mut adc);
let mut a13 = pins.a13.into_analog_input(&mut adc);
let mut a14 = pins.a14.into_analog_input(&mut adc);
let mut a15 = pins.a15.into_analog_input(&mut adc);
loop {
let values: [u16; 16] = [
nb::block!(adc.read(&mut a0)).void_unwrap(),
nb::block!(adc.read(&mut a1)).void_unwrap(),
nb::block!(adc.read(&mut a2)).void_unwrap(),
nb::block!(adc.read(&mut a3)).void_unwrap(),
nb::block!(adc.read(&mut a4)).void_unwrap(),
nb::block!(adc.read(&mut a5)).void_unwrap(),
nb::block!(adc.read(&mut a6)).void_unwrap(),
nb::block!(adc.read(&mut a7)).void_unwrap(),
nb::block!(adc.read(&mut a8)).void_unwrap(),
nb::block!(adc.read(&mut a9)).void_unwrap(),
nb::block!(adc.read(&mut a10)).void_unwrap(),
nb::block!(adc.read(&mut a11)).void_unwrap(),
nb::block!(adc.read(&mut a12)).void_unwrap(),
nb::block!(adc.read(&mut a13)).void_unwrap(),
nb::block!(adc.read(&mut a14)).void_unwrap(),
nb::block!(adc.read(&mut a15)).void_unwrap(),
];
for (i, v) in values.iter().enumerate() {
ufmt::uwrite!(&mut serial, "A{}: {} ", i, v).void_unwrap();
}
ufmt::uwriteln!(&mut serial, "\r").void_unwrap();
delay.delay_ms(1000u16);
}
}
| 36.266667 | 85 | 0.584252 |
760567c74db7af2f8caa619c1f2256cddd85a081 | 11,757 | use crate::{udouble, umax, ModularInteger, ModularUnaryOps};
use core::ops::*;
use num_traits::{Inv, Pow};
// FIXME: use unchecked operators to speed up calculation (after https://github.com/rust-lang/rust/issues/85122)
/// An unsigned integer modulo (pseudo) Mersenne primes `2^P - K`, it supports `P` up to 127 and `K < 2^(P-1)`
///
/// IMPORTANT NOTE: this class assumes that `2^P-K` is a prime. During compliation, we don't do full check
/// of the primality of `2^P-K`. If it's not a prime, then the modular division and inverse will panic.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct MersenneInt<const P: u8, const K: umax>(umax); // the underlying integer is in the half-open range [0, 2^P-K)
// XXX: support other primes as modulo, such as solinas prime, proth prime
impl<const P: u8, const K: umax> MersenneInt<P, K> {
const BITMASK: umax = (1 << P) - 1;
const MODULUS: umax = (1 << P) - K;
// Calculate v % Self::MODULUS, where v is a umax integer
const fn reduce_single(v: umax) -> umax {
let mut lo = v & Self::BITMASK;
let mut hi = v >> P;
while hi > 0 {
let sum = if K == 1 { hi + lo } else { hi * K + lo };
lo = sum & Self::BITMASK;
hi = sum >> P;
}
if K == 1 {
lo
} else {
if lo >= Self::MODULUS {
lo - Self::MODULUS
} else {
lo
}
}
}
// Calculate v % Self::MODULUS, where v is a udouble integer
fn reduce_double(v: udouble) -> umax {
// reduce modulo
let mut lo = v.lo & Self::BITMASK;
let mut hi = v >> P;
while hi.hi > 0 {
// first reduce until high bits fit in umax
let sum = if K == 1 { hi + lo } else { hi * K + lo };
lo = sum.lo & Self::BITMASK;
hi = sum >> P;
}
let mut hi = hi.lo;
while hi > 0 {
// then reduce the smaller high bits
let sum = if K == 1 { hi + lo } else { hi * K + lo };
lo = sum & Self::BITMASK;
hi = sum >> P;
}
if K == 1 {
lo
} else {
if lo >= Self::MODULUS {
lo - Self::MODULUS
} else {
lo
}
}
}
/// Create a new MersenneInt instance from a normal integer (by modulo `2^P-K`)
#[inline]
pub const fn new(n: umax) -> Self {
// FIXME: use compile time checks, maybe after https://github.com/rust-lang/rust/issues/76560
assert!(P <= 127);
assert!(K > 0 && K < 2u128.pow(P as u32 - 1) && K % 2 == 1);
assert!(
Self::MODULUS % 3 != 0
&& Self::MODULUS % 5 != 0
&& Self::MODULUS % 7 != 0
&& Self::MODULUS % 11 != 0
&& Self::MODULUS % 13 != 0
); // error on easy composites
Self(Self::reduce_single(n))
}
}
impl<const P: u8, const K: umax> From<umax> for MersenneInt<P, K> {
fn from(v: umax) -> Self {
Self(v)
}
}
impl<const P: u8, const K: umax> From<MersenneInt<P, K>> for umax {
fn from(v: MersenneInt<P, K>) -> Self {
v.0
}
}
impl<const P: u8, const K: umax> Add for MersenneInt<P, K> {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
let sum = self.0 + rhs.0;
Self(if sum >= Self::MODULUS {
sum - Self::MODULUS
} else {
sum
})
}
}
impl<const P: u8, const K: umax> Sub for MersenneInt<P, K> {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self {
Self(if self.0 >= rhs.0 {
self.0 - rhs.0
} else {
Self::MODULUS - (rhs.0 - self.0)
})
}
}
impl<const P: u8, const K: umax> Mul for MersenneInt<P, K> {
type Output = Self;
#[inline]
fn mul(self, rhs: Self) -> Self::Output {
if (P as u32) < (umax::BITS / 2) {
Self(Self::reduce_single(self.0 * rhs.0))
} else {
Self(Self::reduce_double(udouble::widening_mul(self.0, rhs.0)))
}
}
}
impl<const P: u8, const K: umax> Pow<umax> for MersenneInt<P, K> {
type Output = Self;
fn pow(self, rhs: umax) -> Self::Output {
match rhs {
1 => self,
2 => self * self,
_ => {
let mut multi = self;
let mut exp = rhs;
let mut result = Self(1);
while exp > 0 {
if exp & 1 != 0 {
result = result * multi;
}
multi = multi.square();
exp >>= 1;
}
result
}
}
}
}
impl<const P: u8, const K: umax> Neg for MersenneInt<P, K> {
type Output = Self;
fn neg(self) -> Self::Output {
Self(if self.0 == 0 {
0
} else {
Self::MODULUS - self.0
})
}
}
impl<const P: u8, const K: umax> Inv for MersenneInt<P, K> {
type Output = Self;
fn inv(self) -> Self::Output {
// It seems that extended gcd is faster than using fermat's theorem a^-1 = a^(p-2) mod p
// For faster inverse using fermat theorem, refer to https://eprint.iacr.org/2018/1038.pdf (haven't benchmarked with this)
Self(if (P as u32) < usize::BITS {
(self.0 as usize)
.invm(&(Self::MODULUS as usize))
.expect("the modulus shoud be a prime") as umax
} else {
self.0
.invm(&Self::MODULUS)
.expect("the modulus shoud be a prime")
})
}
}
impl<const P: u8, const K: umax> Div for MersenneInt<P, K> {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
self * rhs.inv()
}
}
impl<const P: u8, const K: umax> ModularInteger for MersenneInt<P, K> {
type Base = umax;
#[inline]
fn modulus(&self) -> &Self::Base {
&Self::MODULUS
}
#[inline]
fn residue(&self) -> Self::Base {
if self.0 == Self::MODULUS {
0
} else {
self.0
}
}
#[inline]
fn convert(&self, n: Self::Base) -> Self {
Self::new(n)
}
#[inline]
fn double(self) -> Self {
let sum = self.0 << 1;
Self(if sum > Self::MODULUS {
sum - Self::MODULUS
} else {
sum
})
}
#[inline]
fn square(self) -> Self {
if (P as u32) < (umax::BITS / 2) {
Self(Self::reduce_single(self.0 * self.0))
} else {
Self(Self::reduce_double(udouble::widening_square(self.0)))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{ModularCoreOps, ModularPow};
use rand::random;
const P1: u128 = (1 << 31) - 1;
const P2: u128 = (1 << 61) - 1;
const P3: u128 = (1 << 127) - 1;
const P4: u128 = (1 << 32) - 5;
const P5: u128 = (1 << 56) - 5;
const P6: u128 = (1 << 122) - 3;
const NRANDOM: u32 = 10;
#[test]
fn creation_test() {
// random creation test
for _ in 0..NRANDOM {
let a = random::<u128>();
assert_eq!(MersenneInt::<31, 1>::new(a).residue(), a % P1);
assert_eq!(MersenneInt::<61, 1>::new(a).residue(), a % P2);
assert_eq!(MersenneInt::<127, 1>::new(a).residue(), a % P3);
assert_eq!(MersenneInt::<32, 5>::new(a).residue(), a % P4);
assert_eq!(MersenneInt::<56, 5>::new(a).residue(), a % P5);
assert_eq!(MersenneInt::<122, 3>::new(a).residue(), a % P6);
}
}
#[test]
fn test_against_prim() {
for _ in 0..NRANDOM {
let (a, b) = (random::<u128>(), random::<u128>());
let e = random::<u8>();
// mod 2^31-1
let am = MersenneInt::<31, 1>::new(a);
let bm = MersenneInt::<31, 1>::new(b);
assert_eq!((am + bm).residue(), a.addm(b, &P1));
assert_eq!((am - bm).residue(), a.subm(b, &P1));
assert_eq!((am * bm).residue(), a.mulm(b, &P1));
assert_eq!((am / bm).residue(), a.mulm(b.invm(&P1).unwrap(), &P1));
assert_eq!(am.neg().residue(), a.negm(&P1));
assert_eq!(am.double().residue(), a.dblm(&P1));
assert_eq!(am.square().residue(), a.sqm(&P1));
assert_eq!(am.pow(e as u128).residue(), a.powm(e as u128, &P1));
// mod 2^61-1
let am = MersenneInt::<61, 1>::new(a);
let bm = MersenneInt::<61, 1>::new(b);
assert_eq!((am + bm).residue(), a.addm(b, &P2));
assert_eq!((am - bm).residue(), a.subm(b, &P2));
assert_eq!((am * bm).residue(), a.mulm(b, &P2));
assert_eq!((am / bm).residue(), a.mulm(b.invm(&P2).unwrap(), &P2));
assert_eq!(am.neg().residue(), a.negm(&P2));
assert_eq!(am.double().residue(), a.dblm(&P2));
assert_eq!(am.square().residue(), a.sqm(&P2));
assert_eq!(am.pow(e as u128).residue(), a.powm(e as u128, &P2));
// mod 2^127-1
let am = MersenneInt::<127, 1>::new(a);
let bm = MersenneInt::<127, 1>::new(b);
assert_eq!((am + bm).residue(), a.addm(b, &P3));
assert_eq!((am - bm).residue(), a.subm(b, &P3));
assert_eq!((am * bm).residue(), a.mulm(b, &P3));
assert_eq!((am / bm).residue(), a.mulm(b.invm(&P3).unwrap(), &P3));
assert_eq!(am.neg().residue(), a.negm(&P3));
assert_eq!(am.double().residue(), a.dblm(&P3));
assert_eq!(am.square().residue(), a.sqm(&P3));
assert_eq!(am.pow(e as u128).residue(), a.powm(e as u128, &P3));
// mod 2^32-5
let am = MersenneInt::<32, 5>::new(a);
let bm = MersenneInt::<32, 5>::new(b);
assert_eq!((am + bm).residue(), a.addm(b, &P4));
assert_eq!((am - bm).residue(), a.subm(b, &P4));
assert_eq!((am * bm).residue(), a.mulm(b, &P4));
assert_eq!((am / bm).residue(), a.mulm(b.invm(&P4).unwrap(), &P4));
assert_eq!(am.neg().residue(), a.negm(&P4));
assert_eq!(am.double().residue(), a.dblm(&P4));
assert_eq!(am.square().residue(), a.sqm(&P4));
assert_eq!(am.pow(e as u128).residue(), a.powm(e as u128, &P4));
// mod 2^56-5
let am = MersenneInt::<56, 5>::new(a);
let bm = MersenneInt::<56, 5>::new(b);
assert_eq!((am + bm).residue(), a.addm(b, &P5));
assert_eq!((am - bm).residue(), a.subm(b, &P5));
assert_eq!((am * bm).residue(), a.mulm(b, &P5));
assert_eq!((am / bm).residue(), a.mulm(b.invm(&P5).unwrap(), &P5));
assert_eq!(am.neg().residue(), a.negm(&P5));
assert_eq!(am.double().residue(), a.dblm(&P5));
assert_eq!(am.square().residue(), a.sqm(&P5));
assert_eq!(am.pow(e as u128).residue(), a.powm(e as u128, &P5));
// mod 2^122-3
let am = MersenneInt::<122, 3>::new(a);
let bm = MersenneInt::<122, 3>::new(b);
assert_eq!((am + bm).residue(), a.addm(b, &P6));
assert_eq!((am - bm).residue(), a.subm(b, &P6));
assert_eq!((am * bm).residue(), a.mulm(b, &P6));
assert_eq!((am / bm).residue(), a.mulm(b.invm(&P6).unwrap(), &P6));
assert_eq!(am.neg().residue(), a.negm(&P6));
assert_eq!(am.double().residue(), a.dblm(&P6));
assert_eq!(am.square().residue(), a.sqm(&P6));
assert_eq!(am.pow(e as u128).residue(), a.powm(e as u128, &P6));
}
}
}
| 33.881844 | 130 | 0.479884 |
1ee40e6bf8e731b75fd18c7309af098abd5cb9fe | 31,854 | #[doc = "Register `CLK_CLKSEL2` reader"]
pub struct R(crate::R<CLK_CLKSEL2_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<CLK_CLKSEL2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<CLK_CLKSEL2_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<CLK_CLKSEL2_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `CLK_CLKSEL2` writer"]
pub struct W(crate::W<CLK_CLKSEL2_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<CLK_CLKSEL2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<CLK_CLKSEL2_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<CLK_CLKSEL2_SPEC>) -> Self {
W(writer)
}
}
#[doc = "PWM0 Clock Source Selection\nThe peripheral clock source of PWM0 is defined by PWM0SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK0.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information.\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PWM0SEL_A {
#[doc = "0: Clock source from PLL"]
_0 = 0,
#[doc = "1: Clock source from PCLK0"]
_1 = 1,
}
impl From<PWM0SEL_A> for bool {
#[inline(always)]
fn from(variant: PWM0SEL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `PWM0SEL` reader - PWM0 Clock Source Selection\nThe peripheral clock source of PWM0 is defined by PWM0SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK0.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct PWM0SEL_R(crate::FieldReader<bool, PWM0SEL_A>);
impl PWM0SEL_R {
pub(crate) fn new(bits: bool) -> Self {
PWM0SEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PWM0SEL_A {
match self.bits {
false => PWM0SEL_A::_0,
true => PWM0SEL_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == PWM0SEL_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == PWM0SEL_A::_1
}
}
impl core::ops::Deref for PWM0SEL_R {
type Target = crate::FieldReader<bool, PWM0SEL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `PWM0SEL` writer - PWM0 Clock Source Selection\nThe peripheral clock source of PWM0 is defined by PWM0SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK0.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct PWM0SEL_W<'a> {
w: &'a mut W,
}
impl<'a> PWM0SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PWM0SEL_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clock source from PLL"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(PWM0SEL_A::_0)
}
#[doc = "Clock source from PCLK0"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(PWM0SEL_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "PWM1 Clock Source Selection\nThe peripheral clock source of PWM1 is defined by PWM1SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK1.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information.\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PWM1SEL_A {
#[doc = "0: Clock source from PLL"]
_0 = 0,
#[doc = "1: Clock source from PCLK1"]
_1 = 1,
}
impl From<PWM1SEL_A> for bool {
#[inline(always)]
fn from(variant: PWM1SEL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `PWM1SEL` reader - PWM1 Clock Source Selection\nThe peripheral clock source of PWM1 is defined by PWM1SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK1.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct PWM1SEL_R(crate::FieldReader<bool, PWM1SEL_A>);
impl PWM1SEL_R {
pub(crate) fn new(bits: bool) -> Self {
PWM1SEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PWM1SEL_A {
match self.bits {
false => PWM1SEL_A::_0,
true => PWM1SEL_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == PWM1SEL_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == PWM1SEL_A::_1
}
}
impl core::ops::Deref for PWM1SEL_R {
type Target = crate::FieldReader<bool, PWM1SEL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `PWM1SEL` writer - PWM1 Clock Source Selection\nThe peripheral clock source of PWM1 is defined by PWM1SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK1.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct PWM1SEL_W<'a> {
w: &'a mut W,
}
impl<'a> PWM1SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PWM1SEL_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clock source from PLL"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(PWM1SEL_A::_0)
}
#[doc = "Clock source from PCLK1"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(PWM1SEL_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "QSPI0 Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK0.\nNote: If HXT is not supported, clock source of selection '00' will be stopped.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information.\n\nValue on reset: 2"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum QSPI0SEL_A {
#[doc = "0: Clock source from external high speed crystal oscillator (HXT)"]
_0 = 0,
#[doc = "1: Clock source from PLL"]
_1 = 1,
#[doc = "2: Clock source from PCLK0"]
_2 = 2,
#[doc = "3: Clock source from internal high speed RC oscillator (HIRC)"]
_3 = 3,
}
impl From<QSPI0SEL_A> for u8 {
#[inline(always)]
fn from(variant: QSPI0SEL_A) -> Self {
variant as _
}
}
#[doc = "Field `QSPI0SEL` reader - QSPI0 Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK0.\nNote: If HXT is not supported, clock source of selection '00' will be stopped.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct QSPI0SEL_R(crate::FieldReader<u8, QSPI0SEL_A>);
impl QSPI0SEL_R {
pub(crate) fn new(bits: u8) -> Self {
QSPI0SEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> QSPI0SEL_A {
match self.bits {
0 => QSPI0SEL_A::_0,
1 => QSPI0SEL_A::_1,
2 => QSPI0SEL_A::_2,
3 => QSPI0SEL_A::_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == QSPI0SEL_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == QSPI0SEL_A::_1
}
#[doc = "Checks if the value of the field is `_2`"]
#[inline(always)]
pub fn is_2(&self) -> bool {
**self == QSPI0SEL_A::_2
}
#[doc = "Checks if the value of the field is `_3`"]
#[inline(always)]
pub fn is_3(&self) -> bool {
**self == QSPI0SEL_A::_3
}
}
impl core::ops::Deref for QSPI0SEL_R {
type Target = crate::FieldReader<u8, QSPI0SEL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `QSPI0SEL` writer - QSPI0 Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK0.\nNote: If HXT is not supported, clock source of selection '00' will be stopped.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct QSPI0SEL_W<'a> {
w: &'a mut W,
}
impl<'a> QSPI0SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: QSPI0SEL_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "Clock source from external high speed crystal oscillator (HXT)"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(QSPI0SEL_A::_0)
}
#[doc = "Clock source from PLL"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(QSPI0SEL_A::_1)
}
#[doc = "Clock source from PCLK0"]
#[inline(always)]
pub fn _2(self) -> &'a mut W {
self.variant(QSPI0SEL_A::_2)
}
#[doc = "Clock source from internal high speed RC oscillator (HIRC)"]
#[inline(always)]
pub fn _3(self) -> &'a mut W {
self.variant(QSPI0SEL_A::_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 2)) | ((value as u32 & 0x03) << 2);
self.w
}
}
#[doc = "SPI0 Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK1.\nNote: If HXT is not supported, clock source of selection '00' will be stopped.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information.\n\nValue on reset: 2"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum SPI0SEL_A {
#[doc = "0: Clock source from external high speed crystal oscillator (HXT)"]
_0 = 0,
#[doc = "1: Clock source from PLL"]
_1 = 1,
#[doc = "2: Clock source from PCLK1"]
_2 = 2,
#[doc = "3: Clock source from internal high speed RC oscillator (HIRC)"]
_3 = 3,
}
impl From<SPI0SEL_A> for u8 {
#[inline(always)]
fn from(variant: SPI0SEL_A) -> Self {
variant as _
}
}
#[doc = "Field `SPI0SEL` reader - SPI0 Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK1.\nNote: If HXT is not supported, clock source of selection '00' will be stopped.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct SPI0SEL_R(crate::FieldReader<u8, SPI0SEL_A>);
impl SPI0SEL_R {
pub(crate) fn new(bits: u8) -> Self {
SPI0SEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SPI0SEL_A {
match self.bits {
0 => SPI0SEL_A::_0,
1 => SPI0SEL_A::_1,
2 => SPI0SEL_A::_2,
3 => SPI0SEL_A::_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == SPI0SEL_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == SPI0SEL_A::_1
}
#[doc = "Checks if the value of the field is `_2`"]
#[inline(always)]
pub fn is_2(&self) -> bool {
**self == SPI0SEL_A::_2
}
#[doc = "Checks if the value of the field is `_3`"]
#[inline(always)]
pub fn is_3(&self) -> bool {
**self == SPI0SEL_A::_3
}
}
impl core::ops::Deref for SPI0SEL_R {
type Target = crate::FieldReader<u8, SPI0SEL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SPI0SEL` writer - SPI0 Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK1.\nNote: If HXT is not supported, clock source of selection '00' will be stopped.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct SPI0SEL_W<'a> {
w: &'a mut W,
}
impl<'a> SPI0SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SPI0SEL_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "Clock source from external high speed crystal oscillator (HXT)"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(SPI0SEL_A::_0)
}
#[doc = "Clock source from PLL"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(SPI0SEL_A::_1)
}
#[doc = "Clock source from PCLK1"]
#[inline(always)]
pub fn _2(self) -> &'a mut W {
self.variant(SPI0SEL_A::_2)
}
#[doc = "Clock source from internal high speed RC oscillator (HIRC)"]
#[inline(always)]
pub fn _3(self) -> &'a mut W {
self.variant(SPI0SEL_A::_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 4)) | ((value as u32 & 0x03) << 4);
self.w
}
}
#[doc = "BPWM0 Clock Source Selection\nThe peripheral clock source of BPWM0 is defined by BPWM0SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK0.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information.\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BPWM0SEL_A {
#[doc = "0: Clock source from PLL"]
_0 = 0,
#[doc = "1: Clock source from PCLK0"]
_1 = 1,
}
impl From<BPWM0SEL_A> for bool {
#[inline(always)]
fn from(variant: BPWM0SEL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `BPWM0SEL` reader - BPWM0 Clock Source Selection\nThe peripheral clock source of BPWM0 is defined by BPWM0SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK0.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct BPWM0SEL_R(crate::FieldReader<bool, BPWM0SEL_A>);
impl BPWM0SEL_R {
pub(crate) fn new(bits: bool) -> Self {
BPWM0SEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BPWM0SEL_A {
match self.bits {
false => BPWM0SEL_A::_0,
true => BPWM0SEL_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == BPWM0SEL_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == BPWM0SEL_A::_1
}
}
impl core::ops::Deref for BPWM0SEL_R {
type Target = crate::FieldReader<bool, BPWM0SEL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BPWM0SEL` writer - BPWM0 Clock Source Selection\nThe peripheral clock source of BPWM0 is defined by BPWM0SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK0.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct BPWM0SEL_W<'a> {
w: &'a mut W,
}
impl<'a> BPWM0SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: BPWM0SEL_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clock source from PLL"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(BPWM0SEL_A::_0)
}
#[doc = "Clock source from PCLK0"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(BPWM0SEL_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8);
self.w
}
}
#[doc = "BPWM1 Clock Source Selection\nThe peripheral clock source of BPWM1 is defined by BPWM1SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK1.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information.\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BPWM1SEL_A {
#[doc = "0: Clock source from PLL"]
_0 = 0,
#[doc = "1: Clock source from PCLK1"]
_1 = 1,
}
impl From<BPWM1SEL_A> for bool {
#[inline(always)]
fn from(variant: BPWM1SEL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `BPWM1SEL` reader - BPWM1 Clock Source Selection\nThe peripheral clock source of BPWM1 is defined by BPWM1SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK1.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct BPWM1SEL_R(crate::FieldReader<bool, BPWM1SEL_A>);
impl BPWM1SEL_R {
pub(crate) fn new(bits: bool) -> Self {
BPWM1SEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BPWM1SEL_A {
match self.bits {
false => BPWM1SEL_A::_0,
true => BPWM1SEL_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == BPWM1SEL_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == BPWM1SEL_A::_1
}
}
impl core::ops::Deref for BPWM1SEL_R {
type Target = crate::FieldReader<bool, BPWM1SEL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BPWM1SEL` writer - BPWM1 Clock Source Selection\nThe peripheral clock source of BPWM1 is defined by BPWM1SEL. \nNote: If PLL is not supported, clock source of selection '0' will be changed to PCLK1.\nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct BPWM1SEL_W<'a> {
w: &'a mut W,
}
impl<'a> BPWM1SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: BPWM1SEL_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clock source from PLL"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(BPWM1SEL_A::_0)
}
#[doc = "Clock source from PCLK1"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(BPWM1SEL_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9);
self.w
}
}
#[doc = "ADC Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK1.\nNote: If HXT is not supported, clock source of selection '00' will be stopped. \nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information.\n\nValue on reset: 2"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ADCSEL_A {
#[doc = "0: Clock source from external high speed crystal oscillator (HXT) clock"]
_0 = 0,
#[doc = "1: Clock source from PLL"]
_1 = 1,
#[doc = "2: Clock source from PCLK1"]
_2 = 2,
#[doc = "3: Clock source from internal high speed RC oscillator (HIRC) clock"]
_3 = 3,
}
impl From<ADCSEL_A> for u8 {
#[inline(always)]
fn from(variant: ADCSEL_A) -> Self {
variant as _
}
}
#[doc = "Field `ADCSEL` reader - ADC Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK1.\nNote: If HXT is not supported, clock source of selection '00' will be stopped. \nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct ADCSEL_R(crate::FieldReader<u8, ADCSEL_A>);
impl ADCSEL_R {
pub(crate) fn new(bits: u8) -> Self {
ADCSEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADCSEL_A {
match self.bits {
0 => ADCSEL_A::_0,
1 => ADCSEL_A::_1,
2 => ADCSEL_A::_2,
3 => ADCSEL_A::_3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == ADCSEL_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == ADCSEL_A::_1
}
#[doc = "Checks if the value of the field is `_2`"]
#[inline(always)]
pub fn is_2(&self) -> bool {
**self == ADCSEL_A::_2
}
#[doc = "Checks if the value of the field is `_3`"]
#[inline(always)]
pub fn is_3(&self) -> bool {
**self == ADCSEL_A::_3
}
}
impl core::ops::Deref for ADCSEL_R {
type Target = crate::FieldReader<u8, ADCSEL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ADCSEL` writer - ADC Clock Source Selection\nNote: If PLL is not supported, clock source of selection '01' will be changed to PCLK1.\nNote: If HXT is not supported, clock source of selection '00' will be stopped. \nPlease refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
pub struct ADCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> ADCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADCSEL_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "Clock source from external high speed crystal oscillator (HXT) clock"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(ADCSEL_A::_0)
}
#[doc = "Clock source from PLL"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(ADCSEL_A::_1)
}
#[doc = "Clock source from PCLK1"]
#[inline(always)]
pub fn _2(self) -> &'a mut W {
self.variant(ADCSEL_A::_2)
}
#[doc = "Clock source from internal high speed RC oscillator (HIRC) clock"]
#[inline(always)]
pub fn _3(self) -> &'a mut W {
self.variant(ADCSEL_A::_3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 20)) | ((value as u32 & 0x03) << 20);
self.w
}
}
impl R {
#[doc = "Bit 0 - PWM0 Clock Source Selection The peripheral clock source of PWM0 is defined by PWM0SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK0. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn pwm0sel(&self) -> PWM0SEL_R {
PWM0SEL_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - PWM1 Clock Source Selection The peripheral clock source of PWM1 is defined by PWM1SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK1. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn pwm1sel(&self) -> PWM1SEL_R {
PWM1SEL_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bits 2:3 - QSPI0 Clock Source Selection Note: If PLL is not supported, clock source of selection '01' will be changed to PCLK0. Note: If HXT is not supported, clock source of selection '00' will be stopped. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn qspi0sel(&self) -> QSPI0SEL_R {
QSPI0SEL_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bits 4:5 - SPI0 Clock Source Selection Note: If PLL is not supported, clock source of selection '01' will be changed to PCLK1. Note: If HXT is not supported, clock source of selection '00' will be stopped. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn spi0sel(&self) -> SPI0SEL_R {
SPI0SEL_R::new(((self.bits >> 4) & 0x03) as u8)
}
#[doc = "Bit 8 - BPWM0 Clock Source Selection The peripheral clock source of BPWM0 is defined by BPWM0SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK0. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn bpwm0sel(&self) -> BPWM0SEL_R {
BPWM0SEL_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - BPWM1 Clock Source Selection The peripheral clock source of BPWM1 is defined by BPWM1SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK1. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn bpwm1sel(&self) -> BPWM1SEL_R {
BPWM1SEL_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bits 20:21 - ADC Clock Source Selection Note: If PLL is not supported, clock source of selection '01' will be changed to PCLK1. Note: If HXT is not supported, clock source of selection '00' will be stopped. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn adcsel(&self) -> ADCSEL_R {
ADCSEL_R::new(((self.bits >> 20) & 0x03) as u8)
}
}
impl W {
#[doc = "Bit 0 - PWM0 Clock Source Selection The peripheral clock source of PWM0 is defined by PWM0SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK0. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn pwm0sel(&mut self) -> PWM0SEL_W {
PWM0SEL_W { w: self }
}
#[doc = "Bit 1 - PWM1 Clock Source Selection The peripheral clock source of PWM1 is defined by PWM1SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK1. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn pwm1sel(&mut self) -> PWM1SEL_W {
PWM1SEL_W { w: self }
}
#[doc = "Bits 2:3 - QSPI0 Clock Source Selection Note: If PLL is not supported, clock source of selection '01' will be changed to PCLK0. Note: If HXT is not supported, clock source of selection '00' will be stopped. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn qspi0sel(&mut self) -> QSPI0SEL_W {
QSPI0SEL_W { w: self }
}
#[doc = "Bits 4:5 - SPI0 Clock Source Selection Note: If PLL is not supported, clock source of selection '01' will be changed to PCLK1. Note: If HXT is not supported, clock source of selection '00' will be stopped. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn spi0sel(&mut self) -> SPI0SEL_W {
SPI0SEL_W { w: self }
}
#[doc = "Bit 8 - BPWM0 Clock Source Selection The peripheral clock source of BPWM0 is defined by BPWM0SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK0. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn bpwm0sel(&mut self) -> BPWM0SEL_W {
BPWM0SEL_W { w: self }
}
#[doc = "Bit 9 - BPWM1 Clock Source Selection The peripheral clock source of BPWM1 is defined by BPWM1SEL. Note: If PLL is not supported, clock source of selection '0' will be changed to PCLK1. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn bpwm1sel(&mut self) -> BPWM1SEL_W {
BPWM1SEL_W { w: self }
}
#[doc = "Bits 20:21 - ADC Clock Source Selection Note: If PLL is not supported, clock source of selection '01' will be changed to PCLK1. Note: If HXT is not supported, clock source of selection '00' will be stopped. Please refer to section 3.2 NuMicro M031/M032 Series Selection Guide for detailed information."]
#[inline(always)]
pub fn adcsel(&mut self) -> ADCSEL_W {
ADCSEL_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Clock Source Select Control Register 2\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [clk_clksel2](index.html) module"]
pub struct CLK_CLKSEL2_SPEC;
impl crate::RegisterSpec for CLK_CLKSEL2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [clk_clksel2::R](R) reader structure"]
impl crate::Readable for CLK_CLKSEL2_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [clk_clksel2::W](W) writer structure"]
impl crate::Writable for CLK_CLKSEL2_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets CLK_CLKSEL2 to value 0x0020_032b"]
impl crate::Resettable for CLK_CLKSEL2_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x0020_032b
}
}
| 41.261658 | 430 | 0.628116 |
48b4a140ebfeecb9f9f94352e5623b79e768bf98 | 18,515 | // Copyright (C) 2017-2018 Baidu, Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Baidu, Inc., nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#![crate_name = "helloworldsampleenclave"]
#![crate_type = "staticlib"]
#![cfg_attr(not(target_env = "sgx"), no_std)]
#![cfg_attr(target_env = "sgx", feature(rustc_private))]
#![feature(never_type)]
extern crate sgx_types;
#[cfg(not(target_env = "sgx"))]
#[macro_use]
extern crate sgx_tstd as std;
extern crate sgx_tunittest;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_test;
extern crate fnv;
use sgx_types::*;
use std::string::String;
use std::vec::Vec;
use std::io::{self, Write};
use std::{slice};
use sgx_tunittest::*;
#[macro_use]
mod macros;
mod bytes;
pub mod unstable;
mod test_de;
mod test_annotations;
mod test_borrow;
//mod test_gen;
mod test_identifier;
mod test_macros;
mod test_remote;
mod test_roundtrip;
mod test_ser;
mod test_unstable;
mod test_value;
mod test_ignored_any;
#[no_mangle]
pub extern "C" fn say_something(some_string: *const u8, some_len: usize) -> sgx_status_t {
let str_slice = unsafe { slice::from_raw_parts(some_string, some_len) };
let _ = io::stdout().write(str_slice);
// A sample &'static string
let rust_raw_string = "This is a in-Enclave ";
// An array
let word:[u8;4] = [82, 117, 115, 116];
// An vector
let word_vec:Vec<u8> = vec![32, 115, 116, 114, 105, 110, 103, 33];
// Construct a string from &'static string
let mut hello_string = String::from(rust_raw_string);
// Iterate on word array
for c in word.iter() {
hello_string.push(*c as char);
}
// Rust style convertion
hello_string += String::from_utf8(word_vec).expect("Invalid UTF-8")
.as_str();
// Ocall to normal world for output
println!("{}", &hello_string);
rsgx_unit_tests!(test_de::test_osstring,
test_de::test_cstr,
test_de::test_cstr_internal_null,
test_de::test_cstr_internal_null_end,
test_de::test_never_type,
test_de::test_bool,
test_de::test_isize,
test_de::test_ints,
test_de::test_uints,
test_de::test_floats,
test_de::test_small_int_to_128,
test_de::test_char,
test_de::test_string,
test_de::test_option,
test_de::test_result,
test_de::test_unit,
test_de::test_unit_struct,
test_de::test_newtype_struct,
test_de::test_tuple_struct,
test_de::test_btreeset,
test_de::test_hashset,
test_de::test_vec,
test_de::test_array,
test_de::test_tuple,
test_de::test_btreemap,
test_de::test_hashmap,
test_de::test_struct,
test_de::test_struct_with_skip,
test_de::test_struct_skip_all,
test_de::test_struct_skip_default,
test_de::test_struct_skip_all_deny_unknown,
test_de::test_struct_default,
test_de::test_enum_unit,
test_de::test_enum_simple,
test_de::test_enum_seq,
test_de::test_enum_map,
test_de::test_enum_unit_usize,
test_de::test_enum_unit_bytes,
test_de::test_enum_other_unit,
test_de::test_enum_other,
test_de::test_box,
test_de::test_boxed_slice,
test_de::test_duration,
test_de::test_system_time,
test_de::test_range,
test_de::test_range_inclusive,
test_de::test_bound,
test_de::test_path,
test_de::test_path_buf,
test_de::test_cstring,
test_de::test_rc,
test_de::test_rc_weak_some,
test_de::test_rc_weak_none,
test_de::test_arc,
test_de::test_arc_weak_some,
test_de::test_arc_weak_none,
test_de::test_wrapping,
test_de::test_rc_dst,
test_de::test_arc_dst,
test_de::test_net_ipv4addr_readable,
test_de::test_net_ipv6addr_readable,
test_de::test_net_ipaddr_readable,
test_de::test_net_socketaddr_readable,
test_de::test_net_ipv4addr_compact,
test_de::test_net_ipv6addr_compact,
test_de::test_net_ipaddr_compact,
test_de::test_net_socketaddr_compact,
test_de::test_never_result,
test_de::test_unknown_field,
test_de::test_skipped_field_is_unknown,
test_de::test_skip_all_deny_unknown,
test_de::test_unknown_variant,
test_de::test_enum_skipped_variant,
test_de::test_enum_skip_all,
test_de::test_duplicate_field_struct,
test_de::test_duplicate_field_enum,
test_de::test_enum_out_of_range,
test_de::test_short_tuple,
test_de::test_short_array,
test_de::test_cstring_internal_null,
test_de::test_cstring_internal_null_end,
test_de::test_unit_from_empty_seq,
test_de::test_unit_from_empty_seq_without_len,
test_de::test_unit_from_tuple_struct,
test_de::test_string_from_unit,
test_de::test_btreeset_from_unit,
test_de::test_btreeset_from_unit_struct,
test_de::test_hashset_from_unit,
test_de::test_hashset_from_unit_struct,
test_de::test_vec_from_unit,
test_de::test_vec_from_unit_struct,
test_de::test_zero_array_from_unit,
test_de::test_zero_array_from_unit_struct,
test_de::test_btreemap_from_unit,
test_de::test_btreemap_from_unit_struct,
test_de::test_hashmap_from_unit,
test_de::test_hashmap_from_unit_struct,
test_de::test_bool_from_string,
test_de::test_number_from_string,
test_de::test_integer_from_float,
test_de::test_unit_struct_from_seq,
test_de::test_wrapping_overflow,
test_de::test_ignored_any,
test_annotations::test_default_struct,
test_annotations::test_default_tuple,
test_annotations::test_default_struct_variant,
test_annotations::test_default_tuple_variant,
test_annotations::test_no_std_default,
test_annotations::test_elt_not_deserialize,
test_annotations::test_ignore_unknown,
test_annotations::test_rename_struct,
test_annotations::test_unknown_field_rename_struct,
test_annotations::test_rename_enum,
test_annotations::test_unknown_field_rename_enum,
test_annotations::test_skip_serializing_struct,
test_annotations::test_skip_serializing_tuple_struct,
test_annotations::test_skip_struct,
test_annotations::test_skip_serializing_enum,
test_annotations::test_elt_not_serialize,
test_annotations::test_serialize_with_struct,
test_annotations::test_serialize_with_enum,
test_annotations::test_serialize_with_variant,
test_annotations::test_deserialize_with_variant,
test_annotations::test_deserialize_with_struct,
test_annotations::test_deserialize_with_enum,
test_annotations::test_missing_renamed_field_struct,
test_annotations::test_missing_renamed_field_enum,
test_annotations::test_invalid_length_enum,
test_annotations::test_from_into_traits,
test_annotations::test_collect_other,
test_annotations::test_flatten_struct_enum,
test_annotations::test_flatten_struct_tag_content_enum,
test_annotations::test_flatten_struct_tag_content_enum_newtype,
test_annotations::test_unknown_field_in_flatten,
test_annotations::test_complex_flatten,
test_annotations::test_flatten_map_twice,
test_annotations::test_flatten_unsupported_type,
test_annotations::test_non_string_keys,
test_annotations::test_lifetime_propagation_for_flatten,
test_annotations::test_flatten_enum_newtype,
test_annotations::test_flatten_internally_tagged,
test_annotations::test_externally_tagged_enum_containing_flatten,
test_annotations::test_internally_tagged_enum_containing_flatten,
test_annotations::test_adjacently_tagged_enum_containing_flatten,
test_annotations::test_untagged_enum_containing_flatten,
test_annotations::test_flatten_untagged_enum,
test_annotations::test_flatten_option,
test_annotations::test_transparent_struct,
test_annotations::test_transparent_tuple_struct,
test_annotations::test_internally_tagged_unit_enum_with_unknown_fields,
test_annotations::test_flattened_internally_tagged_unit_enum_with_unknown_fields,
test_annotations::test_flatten_any_after_flatten_struct,
test_borrow::test_borrowed_str,
test_borrow::test_borrowed_str_from_string,
test_borrow::test_borrowed_str_from_str,
test_borrow::test_string_from_borrowed_str,
test_borrow::test_borrowed_bytes,
test_borrow::test_borrowed_bytes_from_bytebuf,
test_borrow::test_borrowed_bytes_from_bytes,
test_borrow::test_tuple,
test_borrow::test_struct,
test_borrow::test_cow,
test_borrow::test_lifetimes,
//test_gen::test_gen,
test_identifier::test_variant_identifier,
test_identifier::test_field_identifier,
test_identifier::test_unit_fallthrough,
test_identifier::test_newtype_fallthrough,
test_identifier::test_newtype_fallthrough_generic,
test_macros::test_named_unit,
test_macros::test_ser_named_tuple,
test_macros::test_de_named_tuple,
test_macros::test_ser_named_map,
test_macros::test_de_named_map,
test_macros::test_ser_enum_unit,
test_macros::test_ser_enum_seq,
test_macros::test_ser_enum_map,
test_macros::test_de_enum_unit,
test_macros::test_de_enum_seq,
test_macros::test_de_enum_map,
test_macros::test_lifetimes,
test_macros::test_generic_struct,
test_macros::test_generic_newtype_struct,
test_macros::test_untagged_newtype_struct,
test_macros::test_adjacently_tagged_newtype_struct,
test_macros::test_generic_tuple_struct,
test_macros::test_generic_enum_unit,
test_macros::test_generic_enum_newtype,
test_macros::test_generic_enum_seq,
test_macros::test_generic_enum_map,
test_macros::test_default_ty_param,
test_macros::test_enum_state_field,
test_macros::test_untagged_enum,
test_macros::test_internally_tagged_enum,
test_macros::test_internally_tagged_bytes,
test_macros::test_internally_tagged_struct_variant_containing_unit_variant,
test_macros::test_internally_tagged_borrow,
test_macros::test_adjacently_tagged_enum,
test_macros::test_adjacently_tagged_enum_deny_unknown_fields,
test_macros::test_enum_in_internally_tagged_enum,
test_macros::test_internally_tagged_struct,
test_macros::test_internally_tagged_braced_struct_with_zero_fields,
test_macros::test_internally_tagged_struct_with_flattened_field,
test_macros::test_enum_in_untagged_enum,
test_macros::test_untagged_bytes,
test_macros::test_rename_all,
test_macros::test_untagged_newtype_variant_containing_unit_struct_not_map,
test_macros::test_internally_tagged_newtype_variant_containing_unit_struct,
test_macros::test_untagged_enum_with_flattened_integer_key,
test_roundtrip::ip_addr_roundtrip,
test_roundtrip::socket_addr_roundtrip,
test_ser::test_unit,
test_ser::test_bool,
test_ser::test_isizes,
test_ser::test_usizes,
test_ser::test_floats,
test_ser::test_char,
test_ser::test_str,
test_ser::test_option,
test_ser::test_result,
test_ser::test_slice,
test_ser::test_array,
test_ser::test_vec,
test_ser::test_btreeset,
test_ser::test_hashset,
test_ser::test_tuple,
test_ser::test_btreemap,
test_ser::test_hashmap,
test_ser::test_unit_struct,
test_ser::test_tuple_struct,
test_ser::test_struct,
test_ser::test_enum,
test_ser::test_box,
test_ser::test_boxed_slice,
test_ser::test_duration,
test_ser::test_system_time,
test_ser::test_range,
test_ser::test_range_inclusive,
test_ser::test_bound,
test_ser::test_path,
test_ser::test_path_buf,
test_ser::test_cstring,
test_ser::test_cstr,
test_ser::test_rc,
test_ser::test_rc_weak_some,
test_ser::test_rc_weak_none,
test_ser::test_arc,
test_ser::test_arc_weak_some,
test_ser::test_arc_weak_none,
test_ser::test_wrapping,
test_ser::test_rc_dst,
test_ser::test_arc_dst,
test_ser::test_fmt_arguments,
test_ser::test_net_ipv4addr_readable,
test_ser::test_net_ipv6addr_readable,
test_ser::test_net_ipaddr_readable,
test_ser::test_net_socketaddr_readable,
test_ser::test_net_ipv4addr_compact,
test_ser::test_net_ipv6addr_compact,
test_ser::test_net_ipaddr_compact,
test_ser::test_net_socketaddr_compact,
test_ser::test_never_result,
test_ser::test_cannot_serialize_paths,
test_ser::test_cannot_serialize_mutably_borrowed_ref_cell,
test_ser::test_enum_skipped,
test_ser::test_integer128,
test_unstable::unstable::test_raw_identifiers,
test_value::test_u32_to_enum,
test_value::test_integer128,
test_ignored_any::test_deserialize_enum,
);
println!("All tests finished!");
sgx_status_t::SGX_SUCCESS
}
| 49.373333 | 102 | 0.583581 |
483649c36e772c0888af02ebe796c5816868ea00 | 9,437 | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use engine_traits::KvEngine;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::*;
use tikv_util::worker::Scheduler;
use crate::{cmd::lock_only_filter, endpoint::Task, metrics::RTS_CHANNEL_PENDING_CMD_BYTES};
pub struct Observer<E: KvEngine> {
scheduler: Scheduler<Task<E::Snapshot>>,
}
impl<E: KvEngine> Observer<E> {
pub fn new(scheduler: Scheduler<Task<E::Snapshot>>) -> Self {
Observer { scheduler }
}
pub fn register_to(&self, coprocessor_host: &mut CoprocessorHost<E>) {
// The `resolved-ts` cmd observer will `mem::take` the `Vec<CmdBatch>`, use a low priority
// to let it be the last observer and avoid affecting other observers
coprocessor_host
.registry
.register_cmd_observer(1000, BoxCmdObserver::new(self.clone()));
coprocessor_host
.registry
.register_role_observer(100, BoxRoleObserver::new(self.clone()));
coprocessor_host
.registry
.register_region_change_observer(100, BoxRegionChangeObserver::new(self.clone()));
}
}
impl<E: KvEngine> Clone for Observer<E> {
fn clone(&self) -> Self {
Self {
scheduler: self.scheduler.clone(),
}
}
}
impl<E: KvEngine> Coprocessor for Observer<E> {}
impl<E: KvEngine> CmdObserver<E> for Observer<E> {
fn on_flush_applied_cmd_batch(
&self,
max_level: ObserveLevel,
cmd_batches: &mut Vec<CmdBatch>,
_: &E,
) {
if max_level == ObserveLevel::None {
return;
}
let cmd_batches: Vec<_> = std::mem::take(cmd_batches)
.into_iter()
.filter_map(lock_only_filter)
.collect();
if cmd_batches.is_empty() {
return;
}
let size = cmd_batches.iter().map(|b| b.size()).sum::<usize>();
RTS_CHANNEL_PENDING_CMD_BYTES.add(size as i64);
if let Err(e) = self.scheduler.schedule(Task::ChangeLog {
cmd_batch: cmd_batches,
snapshot: None,
}) {
info!("failed to schedule change log event"; "err" => ?e);
}
}
fn on_applied_current_term(&self, role: StateRole, region: &Region) {
// Start to advance resolved ts after peer becomes leader and apply on its term
if role == StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::RegisterRegion {
region: region.clone(),
}) {
info!("failed to schedule register region task"; "err" => ?e);
}
}
}
}
impl<E: KvEngine> RoleObserver for Observer<E> {
fn on_role_change(&self, ctx: &mut ObserverContext<'_>, role_change: &RoleChange) {
// Stop to advance resolved ts after peer steps down to follower or candidate.
// Do not need to check observe id because we expect all role change events are scheduled in order.
if role_change.state != StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::DeRegisterRegion {
region_id: ctx.region().id,
}) {
info!("failed to schedule deregister region task"; "err" => ?e);
}
}
}
}
impl<E: KvEngine> RegionChangeObserver for Observer<E> {
fn on_region_changed(
&self,
ctx: &mut ObserverContext<'_>,
event: RegionChangeEvent,
role: StateRole,
) {
// If the peer is not leader, it must has not registered the observe region or it is deregistering
// the observe region, so don't need to send `RegionUpdated`/`RegionDestroyed` to update the observe
// region
if role != StateRole::Leader {
return;
}
match event {
RegionChangeEvent::Create => {}
RegionChangeEvent::Update(_) => {
if let Err(e) = self
.scheduler
.schedule(Task::RegionUpdated(ctx.region().clone()))
{
info!("failed to schedule region updated event"; "err" => ?e);
}
}
RegionChangeEvent::Destroy => {
if let RegionChangeEvent::Destroy = event {
if let Err(e) = self
.scheduler
.schedule(Task::RegionDestroyed(ctx.region().clone()))
{
info!("failed to schedule region destroyed event"; "err" => ?e);
}
}
}
RegionChangeEvent::UpdateBuckets(_) => {}
}
}
}
#[cfg(test)]
mod test {
use std::time::Duration;
use engine_rocks::RocksSnapshot;
use engine_traits::{CF_DEFAULT, CF_LOCK, CF_WRITE};
use kvproto::raft_cmdpb::*;
use tikv::storage::kv::TestEngineBuilder;
use tikv_util::worker::{dummy_scheduler, ReceiverWrapper};
use super::*;
fn put_cf(cf: &str, key: &[u8], value: &[u8]) -> Request {
let mut cmd = Request::default();
cmd.set_cmd_type(CmdType::Put);
cmd.mut_put().set_cf(cf.to_owned());
cmd.mut_put().set_key(key.to_vec());
cmd.mut_put().set_value(value.to_vec());
cmd
}
fn expect_recv(rx: &mut ReceiverWrapper<Task<RocksSnapshot>>, data: Vec<Request>) {
if data.is_empty() {
match rx.recv_timeout(Duration::from_millis(10)) {
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => return,
_ => panic!("unexpected result"),
};
}
match rx.recv_timeout(Duration::from_millis(10)).unwrap().unwrap() {
Task::ChangeLog { cmd_batch, .. } => {
assert_eq!(cmd_batch.len(), 1);
assert_eq!(cmd_batch[0].len(), 1);
assert_eq!(&cmd_batch[0].cmds[0].request.get_requests(), &data);
}
_ => panic!("unexpected task"),
};
}
#[test]
fn test_observing() {
let (scheduler, mut rx) = dummy_scheduler();
let observer = Observer::new(scheduler);
let engine = TestEngineBuilder::new().build().unwrap().get_rocksdb();
let mut data = vec![
put_cf(CF_LOCK, b"k1", b"v"),
put_cf(CF_DEFAULT, b"k2", b"v"),
put_cf(CF_LOCK, b"k3", b"v"),
put_cf(CF_LOCK, b"k4", b"v"),
put_cf(CF_DEFAULT, b"k6", b"v"),
put_cf(CF_WRITE, b"k7", b"v"),
put_cf(CF_WRITE, b"k8", b"v"),
];
let mut cmd = Cmd::new(0, RaftCmdRequest::default(), RaftCmdResponse::default());
cmd.request.mut_requests().clear();
for put in &data {
cmd.request.mut_requests().push(put.clone());
}
// Both cdc and resolved-ts worker are observing
let observe_info = CmdObserveInfo::from_handle(
ObserveHandle::new(),
ObserveHandle::new(),
ObserveHandle::default(),
);
let mut cb = CmdBatch::new(&observe_info, 0);
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe all data
expect_recv(&mut rx, data.clone());
// Only cdc is observing
let observe_info = CmdObserveInfo::from_handle(
ObserveHandle::new(),
ObserveHandle::new(),
ObserveHandle::default(),
);
observe_info.rts_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, 0);
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Still observe all data
expect_recv(&mut rx, data.clone());
// Pitr and rts is observing
let observe_info = CmdObserveInfo::from_handle(
ObserveHandle::default(),
ObserveHandle::new(),
ObserveHandle::new(),
);
let mut cb = CmdBatch::new(&observe_info, 0);
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Still observe all data
expect_recv(&mut rx, data.clone());
// Only resolved-ts worker is observing
let observe_info = CmdObserveInfo::from_handle(
ObserveHandle::new(),
ObserveHandle::new(),
ObserveHandle::default(),
);
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, 0);
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Only observe lock related data
data.retain(|p| p.get_put().cf != CF_DEFAULT);
expect_recv(&mut rx, data);
// Both cdc and resolved-ts worker are not observing
let observe_info = CmdObserveInfo::from_handle(
ObserveHandle::new(),
ObserveHandle::new(),
ObserveHandle::default(),
);
observe_info.rts_id.stop_observing();
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, 0);
cb.push(&observe_info, 0, cmd);
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe no data
expect_recv(&mut rx, vec![]);
}
}
| 36.296154 | 108 | 0.567447 |
e8eea4f755d83339dc123830a43554deb53bc897 | 368 | #[doc = "Reader of register MESSAGESTATE"]
pub type R = crate::R<u32, super::MESSAGESTATE>;
#[doc = "Reader of field `VALID`"]
pub type VALID_R = crate::R<u32, u32>;
impl R {
#[doc = "Bits 0:31 - Message Valid Bits (of All Message Objects)"]
#[inline(always)]
pub fn valid(&self) -> VALID_R {
VALID_R::new((self.bits & 0xffff_ffff) as u32)
}
}
| 30.666667 | 70 | 0.622283 |
22d149d3249b10712562064085a843570e6a27e5 | 3,960 | use {
crate::{
bigtable_upload::{self, ConfirmedBlockUploadConfig},
blockstore::Blockstore,
},
solana_runtime::commitment::BlockCommitmentCache,
std::{
cmp::min,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
},
tokio::runtime::Runtime,
};
pub struct BigTableUploadService {
thread: JoinHandle<()>,
}
impl BigTableUploadService {
pub fn new(
runtime: Arc<Runtime>,
bigtable_ledger_storage: solana_storage_bigtable::LedgerStorage,
blockstore: Arc<Blockstore>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
max_complete_transaction_status_slot: Arc<AtomicU64>,
exit: Arc<AtomicBool>,
) -> Self {
Self::new_with_config(
runtime,
bigtable_ledger_storage,
blockstore,
block_commitment_cache,
max_complete_transaction_status_slot,
ConfirmedBlockUploadConfig::default(),
exit,
)
}
pub fn new_with_config(
runtime: Arc<Runtime>,
bigtable_ledger_storage: solana_storage_bigtable::LedgerStorage,
blockstore: Arc<Blockstore>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
max_complete_transaction_status_slot: Arc<AtomicU64>,
config: ConfirmedBlockUploadConfig,
exit: Arc<AtomicBool>,
) -> Self {
info!("Starting BigTable upload service");
let thread = Builder::new()
.name("bigtable-upload".to_string())
.spawn(move || {
Self::run(
runtime,
bigtable_ledger_storage,
blockstore,
block_commitment_cache,
max_complete_transaction_status_slot,
config,
exit,
)
})
.unwrap();
Self { thread }
}
fn run(
runtime: Arc<Runtime>,
bigtable_ledger_storage: solana_storage_bigtable::LedgerStorage,
blockstore: Arc<Blockstore>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
max_complete_transaction_status_slot: Arc<AtomicU64>,
config: ConfirmedBlockUploadConfig,
exit: Arc<AtomicBool>,
) {
let mut start_slot = blockstore.get_first_available_block().unwrap_or_default();
loop {
if exit.load(Ordering::Relaxed) {
break;
}
// The highest slot eligible for upload is the highest root that has complete
// transaction-status metadata
let highest_complete_root = min(
max_complete_transaction_status_slot.load(Ordering::SeqCst),
block_commitment_cache.read().unwrap().root(),
);
let end_slot = min(
highest_complete_root,
start_slot.saturating_add(config.max_num_slots_to_check as u64 * 2),
);
if end_slot <= start_slot {
std::thread::sleep(std::time::Duration::from_secs(1));
continue;
}
let result = runtime.block_on(bigtable_upload::upload_confirmed_blocks(
blockstore.clone(),
bigtable_ledger_storage.clone(),
start_slot,
end_slot,
config.clone(),
exit.clone(),
));
match result {
Ok(last_slot_uploaded) => start_slot = last_slot_uploaded,
Err(err) => {
warn!("bigtable: upload_confirmed_blocks: {}", err);
std::thread::sleep(std::time::Duration::from_secs(2));
}
}
}
}
pub fn join(self) -> thread::Result<()> {
self.thread.join()
}
}
| 31.935484 | 89 | 0.556061 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.