hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
bf52588322bafc804fcb29a3e4c0b9b92bd85838 | 4,497 |
//An example of a list that doesn't work in Rust due to its recursive
//nature.
// enum List {
// Cons(i32, List),
// Nil,
// }
enum List {
Cons(i32, Box<List>),
Nil,
}
use List::{Cons, Nil};
use std::ops::Deref;
//Box<T> is defined as a tuple struct with just one element.
struct MyBox<T>(T);
//Here we can see that the new operator takes the data and just encapsulates it
impl<T> MyBox<T> {
fn new(x: T) -> MyBox<T> {
MyBox(x)
}
}
//The Deref trait, provided by the standard library, requires implementing
// one method named deref that borrows self and returns a reference to
// the inner data.
impl<T> Deref for MyBox<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
struct CustomSmartPointer {
data: String,
}
//The second trait important to smart pointers is the drop trait. It
//lets us customize what happens when a value goes out of scope.
//The compiler will automatically place these calls for us so that
//we don't have to worry about manual memory management unless we want to.
//The Drop trait requires us to implement one method named drop that takes
//a mut ref to self.
impl Drop for CustomSmartPointer {
fn drop(&mut self) {
println!("Dropping CustomSmartPointer with data `{}`!", self.data);
}
}
//std::mem::drop; is in the prelude so we don't need to call it.
fn main() {
//Box allows us to point to data being stored on the heap.
//They also have several uses like when you want to transfer ownership
//of data without it being copied,
//Other cases are like when you have a type whose size isn't known
//at compile time and you want use a value of that type in context
//that needs to know an exact size.
let b = Box::new(5);
println!("b = {}", b);
//When a box pointer goes out of scope both it and the data it's
//pointing to go out of scope.
//Boxes can also be used to define recursive types.
//The below won't work because the compiler can't figure out
//how much space this structure will require to be held.
// let list = Cons(1, Cons(2, Cons(3, Nil)));
let list = Cons(1,
Box::new(Cons(2,
Box::new(Cons(3,
Box::new(Nil))))));
//The Box pointer has therefore broken the infinite recursion cycle,
//and the compiler can now figure out the size of Cons.
//The * operator allows us to follow a reference to the value it
//is point at. Box types implement this feature called the
//Deref type.
let x = 5;
let y = Box::new(x);
assert_eq!(5, x);
assert_eq!(5, *y);
//We can implement our version of the Box pointer that allows
//us to dereference the data
let x = 5;
let y = MyBox::new(x);
assert_eq!(5, x);
assert_eq!(5, *y);
//Derefence coercion is a convience that Rust performs on args of
//fcns and methods that converts a reference to a type that implements
//Deref into a ref to a type that Deref can convert the original type
//to. This featue was added as a convience to reduce the number
//of & and * needed.
let m = MyBox::new(String::from("Rust"));
//This deref is all run at compile time so we don't have performance
//penalty during runtime.
hello(&m);
// Rust does deref coercion when it finds types and trait
// implementations in three cases:
//
// From &T to &U when T: Deref<Target=U>.
// From &mut T to &mut U when T: DerefMut<Target=U>.
// From &mut T to &U when T: Deref<Target=U>.
//The reverse of the last point can't occur because you can't
//convert an immutable ref to mut ref because there's no
//guarantee that there is only one immutable ref to the data.
//The below example is a visual example that lets us see how Rust
//automatically manages the memory for us.
// let c = CustomSmartPointer { data: String::from("my stuff") };
// let d = CustomSmartPointer { data: String::from("other stuff") };
// println!("CustomSmartPointers created.");
//Here we're going to see that we can drop this pointer like its
//hot. In other words we're going to deallocate everything
//before our program is done because we need our space back.
let c = CustomSmartPointer { data: String::from("some data") };
println!("CustomSmartPointer created.");
drop(c);
println!("CustomSmartPointer dropped before the end of main.");
}
fn hello(name: &str) {
println!("Hello, {}!", name);
}
| 33.81203 | 79 | 0.657549 |
2306fa9afa2ef419d98d7664276dc2ef6f2faa65 | 3,238 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-emscripten No support for threads
#![allow(unknown_features)]
#![feature(std_misc)]
/**
A somewhat reduced test case to expose some Valgrind issues.
This originally came from the word-count benchmark.
*/
pub fn map(filename: String, mut emit: map_reduce::putter) {
emit(filename, "1".to_string());
}
mod map_reduce {
use std::collections::HashMap;
use std::sync::mpsc::{channel, Sender};
use std::str;
use std::thread;
pub type putter<'a> = Box<FnMut(String, String) + 'a>;
pub type mapper = extern fn(String, putter);
enum ctrl_proto { find_reducer(Vec<u8>, Sender<isize>), mapper_done, }
fn start_mappers(ctrl: Sender<ctrl_proto>, inputs: Vec<String>) {
for i in &inputs {
let ctrl = ctrl.clone();
let i = i.clone();
thread::spawn(move|| map_task(ctrl.clone(), i.clone()) );
}
}
fn map_task(ctrl: Sender<ctrl_proto>, input: String) {
let mut intermediates = HashMap::new();
fn emit(im: &mut HashMap<String, isize>,
ctrl: Sender<ctrl_proto>, key: String,
_val: String) {
if im.contains_key(&key) {
return;
}
let (tx, rx) = channel();
println!("sending find_reducer");
ctrl.send(ctrl_proto::find_reducer(key.as_bytes().to_vec(), tx)).unwrap();
println!("receiving");
let c = rx.recv().unwrap();
println!("{}", c);
im.insert(key, c);
}
let ctrl_clone = ctrl.clone();
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
::map(input, Box::new(|a,b| emit(&mut intermediates, ctrl.clone(), a, b)));
ctrl_clone.send(ctrl_proto::mapper_done).unwrap();
}
pub fn map_reduce(inputs: Vec<String>) {
let (tx, rx) = channel();
// This thread becomes the master control thread. It spawns others
// to do the rest.
let mut reducers: HashMap<String, isize>;
reducers = HashMap::new();
start_mappers(tx, inputs.clone());
let mut num_mappers = inputs.len() as isize;
while num_mappers > 0 {
match rx.recv().unwrap() {
ctrl_proto::mapper_done => { num_mappers -= 1; }
ctrl_proto::find_reducer(k, cc) => {
let mut c;
match reducers.get(&str::from_utf8(&k).unwrap().to_string()) {
Some(&_c) => { c = _c; }
None => { c = 0; }
}
cc.send(c).unwrap();
}
}
}
}
}
pub fn main() {
map_reduce::map_reduce(
vec!["../src/test/run-pass/hashmap-memory.rs".to_string()]);
}
| 31.134615 | 86 | 0.568561 |
bfdd4347de74e5416368968b354ed18ee5a87c77 | 686 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn test() {
let v: isize;
v = 2; //~ NOTE first assignment
v += 1; //~ ERROR cannot assign twice to immutable variable
//~| NOTE cannot assign twice to immutable
v.clone();
}
fn main() {
}
| 32.666667 | 68 | 0.686589 |
72ba80e9e8ff250596ef8c2642a673f8ab590aca | 1,881 | use std::net::SocketAddr;
use rsocket_rust::async_trait;
use rsocket_rust::{error::RSocketError, transport::Transport, Result};
use tokio::net::TcpStream;
use crate::{connection::TcpConnection, misc::parse_tcp_addr};
#[derive(Debug)]
enum Connector {
Direct(TcpStream),
Lazy(SocketAddr),
}
#[derive(Debug)]
pub struct TcpClientTransport {
connector: Connector,
}
#[async_trait]
impl Transport for TcpClientTransport {
type Conn = TcpConnection;
async fn connect(self) -> Result<TcpConnection> {
match self.connector {
Connector::Direct(socket) => Ok(TcpConnection::from(socket)),
Connector::Lazy(addr) => match TcpStream::connect(addr).await {
Ok(stream) => Ok(TcpConnection::from(stream)),
Err(e) => Err(RSocketError::IO(e).into()),
},
}
}
}
impl From<TcpStream> for TcpClientTransport {
fn from(socket: TcpStream) -> TcpClientTransport {
TcpClientTransport {
connector: Connector::Direct(socket),
}
}
}
impl From<SocketAddr> for TcpClientTransport {
fn from(addr: SocketAddr) -> TcpClientTransport {
TcpClientTransport {
connector: Connector::Lazy(addr),
}
}
}
impl From<String> for TcpClientTransport {
fn from(addr: String) -> Self {
let socket_addr: SocketAddr = parse_tcp_addr(addr)
.parse()
.expect("Invalid transport string!");
TcpClientTransport {
connector: Connector::Lazy(socket_addr),
}
}
}
impl From<&str> for TcpClientTransport {
fn from(addr: &str) -> TcpClientTransport {
let socket_addr: SocketAddr = parse_tcp_addr(addr)
.parse()
.expect("Invalid transport string!");
TcpClientTransport {
connector: Connector::Lazy(socket_addr),
}
}
}
| 26.125 | 75 | 0.620946 |
bf7f94276317b4a2a5b9da77c2ce07f1ec4dd00c | 3,618 | //! BoosterParameters for controlling
//! [DART](https://xgboost.readthedocs.io/en/latest/tutorials/dart.html) boosters.
use std::default::Default;
use super::Interval;
/// Type of sampling algorithm.
#[derive(Clone)]
pub enum SampleType {
/// Dropped trees are selected uniformly.
Uniform,
/// Dropped trees are selected in proportion to weight.
Weighted,
}
impl ToString for SampleType {
fn to_string(&self) -> String {
match *self {
SampleType::Uniform => "uniform".to_owned(),
SampleType::Weighted => "weighted".to_owned(),
}
}
}
impl Default for SampleType {
fn default() -> Self { SampleType::Uniform }
}
/// Type of normalization algorithm.
#[derive(Clone)]
pub enum NormalizeType {
/// New trees have the same weight of each of dropped trees.
/// * weight of new trees are 1 / (k + learning_rate)
/// dropped trees are scaled by a factor of k / (k + learning_rate)
Tree,
/// New trees have the same weight of sum of dropped trees (forest).
///
/// * weight of new trees are 1 / (1 + learning_rate)
/// * droppped trees are scaled by a factor of 1 / (1 + learning_rate)
Forest,
}
impl ToString for NormalizeType {
fn to_string(&self) -> String {
match *self {
NormalizeType::Tree => "tree".to_owned(),
NormalizeType::Forest => "forest".to_owned(),
}
}
}
impl Default for NormalizeType {
fn default() -> Self { NormalizeType::Tree }
}
/// Additional parameters for Dart Booster.
#[derive(Builder, Clone)]
#[builder(build_fn(validate = "Self::validate"))]
#[builder(default)]
pub struct DartBoosterParameters {
/// Type of sampling algorithm.
sample_type: SampleType,
/// Type of normalization algorithm.
normalize_type: NormalizeType,
/// Dropout rate (a fraction of previous trees to drop during the dropout).
/// * range: [0.0, 1.0]
rate_drop: f32,
/// When this flag is enabled, at least one tree is always dropped during the dropout
/// (allows Binomial-plus-one or epsilon-dropout from the original DART paper).
one_drop: bool,
/// Probability of skipping the dropout procedure during a boosting iteration.
/// If a dropout is skipped, new trees are added in the same manner as gbtree.
/// Note that non-zero skip_drop has higher priority than rate_drop or one_drop.
/// * range: [0.0, 1.0]
skip_drop: f32,
}
impl Default for DartBoosterParameters {
fn default() -> Self {
DartBoosterParameters {
sample_type: SampleType::default(),
normalize_type: NormalizeType::default(),
rate_drop: 0.0,
one_drop: false,
skip_drop: 0.0,
}
}
}
impl DartBoosterParameters {
pub(crate) fn as_string_pairs(&self) -> Vec<(String, String)> {
let mut v = Vec::new();
v.push(("booster".to_owned(), "dart".to_owned()));
v.push(("sample_type".to_owned(), self.sample_type.to_string()));
v.push(("normalize_type".to_owned(), self.normalize_type.to_string()));
v.push(("rate_drop".to_owned(), self.rate_drop.to_string()));
v.push(("one_drop".to_owned(), (self.one_drop as u8).to_string()));
v.push(("skip_drop".to_owned(), self.skip_drop.to_string()));
v
}
}
impl DartBoosterParametersBuilder {
fn validate(&self) -> Result<(), String> {
Interval::new_closed_closed(0.0, 1.0).validate(&self.rate_drop, "rate_drop")?;
Interval::new_closed_closed(0.0, 1.0).validate(&self.skip_drop, "skip_drop")?;
Ok(())
}
}
| 30.15 | 89 | 0.635158 |
f4699779adb1910c8a631b2c78e501ea7132de36 | 8,508 | // Copyright (c) 2017-present PyO3 Project and Contributors
use crate::conversion::FromPyObject;
use crate::conversion::{PyTryFrom, ToPyObject};
use crate::err::{PyErr, PyResult};
use crate::gil;
use crate::instance::PyNativeType;
use crate::object::PyObject;
use crate::types::PyAny;
use crate::AsPyPointer;
use crate::IntoPy;
use crate::Python;
use crate::{ffi, FromPy};
use std::borrow::Cow;
use std::ffi::CStr;
use std::os::raw::c_char;
use std::ptr::NonNull;
use std::str;
/// Represents a Python `string`.
///
/// This type is immutable
#[repr(transparent)]
pub struct PyString(PyObject);
pyobject_native_type!(PyString, ffi::PyUnicode_Type, ffi::PyUnicode_Check);
impl PyString {
/// Creates a new Python string object.
///
/// Panics if out of memory.
pub fn new<'p>(py: Python<'p>, s: &str) -> &'p PyString {
let ptr = s.as_ptr() as *const c_char;
let len = s.len() as ffi::Py_ssize_t;
unsafe { py.from_owned_ptr(ffi::PyUnicode_FromStringAndSize(ptr, len)) }
}
pub fn from_object<'p>(src: &'p PyAny, encoding: &str, errors: &str) -> PyResult<&'p PyString> {
unsafe {
src.py()
.from_owned_ptr_or_err::<PyString>(ffi::PyUnicode_FromEncodedObject(
src.as_ptr(),
encoding.as_ptr() as *const c_char,
errors.as_ptr() as *const c_char,
))
}
}
/// Get the Python string as a byte slice.
///
/// Returns a `UnicodeEncodeError` if the input is not valid unicode
/// (containing unpaired surrogates).
#[inline]
pub fn as_bytes(&self) -> PyResult<&[u8]> {
unsafe {
let mut size: ffi::Py_ssize_t = 0;
let data = ffi::PyUnicode_AsUTF8AndSize(self.0.as_ptr(), &mut size) as *const u8;
if data.is_null() {
Err(PyErr::fetch(self.py()))
} else {
Ok(std::slice::from_raw_parts(data, size as usize))
}
}
}
/// Convert the `PyString` into a Rust string.
pub fn to_string(&self) -> PyResult<Cow<str>> {
let bytes = self.as_bytes()?;
let string = std::str::from_utf8(bytes)?;
Ok(Cow::Borrowed(string))
}
/// Convert the `PyString` into a Rust string.
///
/// Unpaired surrogates invalid UTF-8 sequences are
/// replaced with U+FFFD REPLACEMENT CHARACTER.
pub fn to_string_lossy(&self) -> Cow<str> {
match self.to_string() {
Ok(s) => s,
Err(_) => {
unsafe {
let py_bytes = ffi::PyUnicode_AsEncodedString(
self.0.as_ptr(),
CStr::from_bytes_with_nul(b"utf-8\0").unwrap().as_ptr(),
CStr::from_bytes_with_nul(b"surrogatepass\0")
.unwrap()
.as_ptr(),
);
// Since we have a valid PyString and replace any surrogates, assume success.
debug_assert!(!py_bytes.is_null());
// ensure DECREF will be called
gil::register_pointer(NonNull::new(py_bytes).unwrap());
let buffer = ffi::PyBytes_AsString(py_bytes) as *const u8;
debug_assert!(!buffer.is_null());
let length = ffi::PyBytes_Size(py_bytes) as usize;
let bytes = std::slice::from_raw_parts(buffer, length);
String::from_utf8_lossy(bytes)
}
}
}
}
}
/// Converts Rust `str` to Python object.
/// See `PyString::new` for details on the conversion.
impl ToPyObject for str {
#[inline]
fn to_object(&self, py: Python) -> PyObject {
PyString::new(py, self).into()
}
}
impl<'a> IntoPy<PyObject> for &'a str {
#[inline]
fn into_py(self, py: Python) -> PyObject {
PyString::new(py, self).into()
}
}
/// Converts Rust `Cow<str>` to Python object.
/// See `PyString::new` for details on the conversion.
impl<'a> ToPyObject for Cow<'a, str> {
#[inline]
fn to_object(&self, py: Python) -> PyObject {
PyString::new(py, self).into()
}
}
/// Converts Rust `String` to Python object.
/// See `PyString::new` for details on the conversion.
impl ToPyObject for String {
#[inline]
fn to_object(&self, py: Python) -> PyObject {
PyString::new(py, self).into()
}
}
impl FromPy<String> for PyObject {
fn from_py(other: String, py: Python) -> Self {
PyString::new(py, &other).into()
}
}
impl<'a> IntoPy<PyObject> for &'a String {
#[inline]
fn into_py(self, py: Python) -> PyObject {
PyString::new(py, self).into()
}
}
/// Allows extracting strings from Python objects.
/// Accepts Python `str` and `unicode` objects.
impl<'source> crate::FromPyObject<'source> for Cow<'source, str> {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
<PyString as PyTryFrom>::try_from(ob)?.to_string()
}
}
/// Allows extracting strings from Python objects.
/// Accepts Python `str` and `unicode` objects.
impl<'a> crate::FromPyObject<'a> for &'a str {
fn extract(ob: &'a PyAny) -> PyResult<Self> {
let s: Cow<'a, str> = crate::FromPyObject::extract(ob)?;
match s {
Cow::Borrowed(r) => Ok(r),
Cow::Owned(r) => {
let r = ob.py().register_any(r);
Ok(r.as_str())
}
}
}
}
/// Allows extracting strings from Python objects.
/// Accepts Python `str` and `unicode` objects.
impl<'source> FromPyObject<'source> for String {
fn extract(obj: &'source PyAny) -> PyResult<Self> {
<PyString as PyTryFrom>::try_from(obj)?
.to_string()
.map(Cow::into_owned)
}
}
#[cfg(test)]
mod test {
use super::PyString;
use crate::instance::AsPyRef;
use crate::object::PyObject;
use crate::Python;
use crate::{FromPyObject, PyTryFrom, ToPyObject};
use std::borrow::Cow;
#[test]
fn test_non_bmp() {
let gil = Python::acquire_gil();
let py = gil.python();
let s = "\u{1F30F}";
let py_string = s.to_object(py);
assert_eq!(s, py_string.extract::<String>(py).unwrap());
}
#[test]
fn test_extract_str() {
let gil = Python::acquire_gil();
let py = gil.python();
let s = "Hello Python";
let py_string = s.to_object(py);
let s2: &str = FromPyObject::extract(py_string.as_ref(py).into()).unwrap();
assert_eq!(s, s2);
}
#[test]
fn test_as_bytes() {
let gil = Python::acquire_gil();
let py = gil.python();
let s = "ascii 🐈";
let obj: PyObject = PyString::new(py, s).into();
let py_string = <PyString as PyTryFrom>::try_from(obj.as_ref(py)).unwrap();
assert_eq!(s.as_bytes(), py_string.as_bytes().unwrap());
}
#[test]
fn test_as_bytes_surrogate() {
let gil = Python::acquire_gil();
let py = gil.python();
let obj: PyObject = py.eval(r#"'\ud800'"#, None, None).unwrap().into();
let py_string = <PyString as PyTryFrom>::try_from(obj.as_ref(py)).unwrap();
assert!(py_string.as_bytes().is_err());
}
#[test]
fn test_to_string_ascii() {
let gil = Python::acquire_gil();
let py = gil.python();
let s = "ascii";
let obj: PyObject = PyString::new(py, s).into();
let py_string = <PyString as PyTryFrom>::try_from(obj.as_ref(py)).unwrap();
assert!(py_string.to_string().is_ok());
assert_eq!(Cow::Borrowed(s), py_string.to_string().unwrap());
}
#[test]
fn test_to_string_unicode() {
let gil = Python::acquire_gil();
let py = gil.python();
let s = "哈哈🐈";
let obj: PyObject = PyString::new(py, s).into();
let py_string = <PyString as PyTryFrom>::try_from(obj.as_ref(py)).unwrap();
assert!(py_string.to_string().is_ok());
assert_eq!(Cow::Borrowed(s), py_string.to_string().unwrap());
}
#[test]
fn test_to_string_lossy() {
let gil = Python::acquire_gil();
let py = gil.python();
let obj: PyObject = py
.eval(r#"'🐈 Hello \ud800World'"#, None, None)
.unwrap()
.into();
let py_string = <PyString as PyTryFrom>::try_from(obj.as_ref(py)).unwrap();
assert_eq!(py_string.to_string_lossy(), "🐈 Hello ���World");
}
}
| 31.984962 | 100 | 0.565115 |
90e4f39150e6b620c3c65e501efbbc430c74b8fa | 1,092 | use bincode;
use failure::Fail;
use crate::subset;
use crate::threshold_decrypt;
/// Honey badger error variants.
#[derive(Debug, Fail)]
pub enum Error {
/// Failed to serialize contribution.
#[fail(display = "Error serializing contribution: {}", _0)]
ProposeBincode(bincode::ErrorKind),
/// Failed to instantiate `Subset`.
#[fail(display = "Failed to instantiate Subset: {}", _0)]
CreateSubset(subset::Error),
/// Failed to input contribution to `Subset`.
#[fail(display = "Failed to input contribution to Subset: {}", _0)]
InputSubset(subset::Error),
/// Failed to handle `Subset` message.
#[fail(display = "Failed to handle Subset message: {}", _0)]
HandleSubsetMessage(subset::Error),
/// Failed to decrypt a contribution.
#[fail(display = "Threshold decryption error: {}", _0)]
ThresholdDecrypt(threshold_decrypt::Error),
/// Unknown sender
#[fail(display = "Unknown sender")]
UnknownSender,
}
/// The result of `HoneyBadger` handling an input or a message.
pub type Result<T> = ::std::result::Result<T, Error>;
| 34.125 | 71 | 0.671245 |
9b665ce0d25d6fa7f89384178cdedc96059365b2 | 4,263 | // Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Utility stream for yielding slots in a loop.
//!
//! This is used instead of `tokio_timer::Interval` because it was unreliable.
use std::time::{Instant, Duration};
use std::marker::PhantomData;
use tokio::timer::Delay;
use futures::prelude::*;
use futures::try_ready;
use log::warn;
use inherents::{InherentDataProviders, InherentData};
use consensus_common::{Error, ErrorKind};
use crate::SlotCompatible;
/// Returns current duration since unix epoch.
pub fn duration_now() -> Option<Duration> {
use std::time::SystemTime;
let now = SystemTime::now();
now.duration_since(SystemTime::UNIX_EPOCH).map_err(|e| {
warn!("Current time {:?} is before unix epoch. Something is wrong: {:?}", now, e);
}).ok()
}
/// Returns the duration until the next slot, based on current duration since
pub fn time_until_next(now: Duration, slot_duration: u64) -> Duration {
let remaining_full_secs = slot_duration - (now.as_secs() % slot_duration) - 1;
let remaining_nanos = 1_000_000_000 - now.subsec_nanos();
Duration::new(remaining_full_secs, remaining_nanos)
}
/// Information about a slot.
pub struct SlotInfo {
/// The slot number.
pub number: u64,
/// Current timestamp.
pub timestamp: u64,
/// The instant at which the slot ends.
pub ends_at: Instant,
/// The inherent data.
pub inherent_data: InherentData,
/// Slot duration.
pub duration: u64,
}
impl SlotInfo {
/// Yields the remaining duration in the slot.
pub fn remaining_duration(&self) -> Duration {
let now = Instant::now();
if now < self.ends_at {
self.ends_at.duration_since(now)
} else {
Duration::from_secs(0)
}
}
}
/// A stream that returns every time there is a new slot.
pub struct Slots<SC> {
last_slot: u64,
slot_duration: u64,
inner_delay: Option<Delay>,
inherent_data_providers: InherentDataProviders,
_marker: PhantomData<SC>,
}
impl<SC> Slots<SC> {
/// Create a new `Slots` stream.
pub fn new(slot_duration: u64, inherent_data_providers: InherentDataProviders) -> Self {
Slots {
last_slot: 0,
slot_duration,
inner_delay: None,
inherent_data_providers,
_marker: PhantomData,
}
}
}
impl<SC: SlotCompatible> Stream for Slots<SC> {
type Item = SlotInfo;
type Error = Error;
fn poll(&mut self) -> Poll<Option<SlotInfo>, Self::Error> {
let slot_duration = self.slot_duration;
self.inner_delay = match self.inner_delay.take() {
None => {
// schedule wait.
let wait_until = match duration_now() {
None => return Ok(Async::Ready(None)),
Some(now) => Instant::now() + time_until_next(now, slot_duration),
};
Some(Delay::new(wait_until))
}
Some(d) => Some(d),
};
if let Some(ref mut inner_delay) = self.inner_delay {
try_ready!(inner_delay.poll().map_err(|e| Error::from(ErrorKind::FaultyTimer(e))));
}
// timeout has fired.
let inherent_data = self.inherent_data_providers.create_inherent_data()
.map_err(crate::inherent_to_common_error)?;
let (timestamp, slot_num) = SC::extract_timestamp_and_slot(&inherent_data)?;
// reschedule delay for next slot.
let ends_at = Instant::now() + time_until_next(Duration::from_secs(timestamp), slot_duration);
self.inner_delay = Some(Delay::new(ends_at));
// never yield the same slot twice.
if slot_num > self.last_slot {
self.last_slot = slot_num;
Ok(
Async::Ready(
Some(SlotInfo {
number: slot_num,
duration: self.slot_duration,
timestamp,
ends_at,
inherent_data,
})
)
)
} else {
// re-poll until we get a new slot.
self.poll()
}
}
}
| 28.42 | 96 | 0.702322 |
eb120a53a612b3b957f9baaef7030ace08703a78 | 2,872 | /// # 1. Two Sum
///
/// Given an array of integers, return indices of the two numbers such that they add up to a specific target.
///
/// You may assume that each input would have exactly one solution, and you may not use the same element twice.
///
/// # Example:
///
/// Given nums = [2, 7, 11, 15], target = 9,
///
/// Because nums[0] + nums[1] = 2 + 7 = 9,
/// return [0, 1].
///
pub trait TwoSum {
fn two_sum(nums: &[i32], target: i32) -> Option<(usize, usize)>;
}
pub struct Solution1;
impl TwoSum for Solution1 {
fn two_sum(nums: &[i32], target: i32) -> Option<(usize, usize)> {
let len = nums.len();
for i in 0..len {
for j in 1..len {
if (nums[i] == target - nums[j]) && i != j {
return Some((i, j));
}
}
}
None
}
}
pub struct Solution2;
impl TwoSum for Solution2 {
fn two_sum(nums: &[i32], target: i32) -> Option<(usize, usize)> {
let mut map = ::std::collections::HashMap::with_capacity(nums.len());
for (i, v) in nums.iter().enumerate() {
map.insert(v, i);
}
for (i, v) in nums.iter().enumerate() {
let k = target - v;
match map.get(&k) {
Some(j) if i != *j => {
return Some((i, *j));
},
_ => {
}
}
}
None
}
}
pub struct Solution3;
impl TwoSum for Solution3 {
fn two_sum(nums: &[i32], target: i32) -> Option<(usize, usize)> {
let mut map = ::std::collections::HashMap::<i32, usize>::with_capacity(nums.len());
for (i, &v) in nums.iter().enumerate() {
let k = target - v;
match map.get(&k) {
Some(&j) if j != i => {
return Some(if j > i {(i, j)} else {(j, i)});
},
_ => {
}
}
map.insert(v, i);
}
None
}
}
#[cfg(test)]
mod test {
use super::TwoSum;
use test::Bencher;
use super::Solution1;
#[test]
fn test_solution1() {
assert_eq!(Solution1::two_sum(&[2, 3, 7, 11], 9), Some((0, 2)));
}
#[bench]
fn bench_solution1(b: &mut Bencher) {
b.iter(|| Solution1::two_sum(&[2, 3, 7, 11], 9));
}
use super::Solution2;
#[test]
fn test_solution2() {
assert_eq!(Solution2::two_sum(&[2, 3, 7, 11], 9), Some((0, 2)));
}
#[bench]
fn bench_solution2(b: &mut Bencher) {
b.iter(|| Solution2::two_sum(&[2, 3, 7, 11], 9));
}
use super::Solution3;
#[test]
fn test_solution3() {
assert_eq!(Solution3::two_sum(&[2, 3, 7, 11], 9), Some((0, 2)));
}
#[bench]
fn bench_solution3(b: &mut Bencher) {
b.iter(|| Solution3::two_sum(&[2, 3, 7, 11], 9));
}
}
| 24.338983 | 111 | 0.471797 |
01f0931e8c1883ddbdfd5c4333d4ed1435f05c89 | 11,343 | #[doc = "Register `INTENCLR` reader"]
pub struct R(crate::R<INTENCLR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<INTENCLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<INTENCLR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<INTENCLR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `INTENCLR` writer"]
pub struct W(crate::W<INTENCLR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<INTENCLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<INTENCLR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<INTENCLR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Write '1' to disable interrupt for event HFCLKSTARTED\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum HFCLKSTARTED_A {
#[doc = "0: Read: Disabled"]
DISABLED = 0,
#[doc = "1: Read: Enabled"]
ENABLED = 1,
}
impl From<HFCLKSTARTED_A> for bool {
#[inline(always)]
fn from(variant: HFCLKSTARTED_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `HFCLKSTARTED` reader - Write '1' to disable interrupt for event HFCLKSTARTED"]
pub struct HFCLKSTARTED_R(crate::FieldReader<bool, HFCLKSTARTED_A>);
impl HFCLKSTARTED_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
HFCLKSTARTED_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> HFCLKSTARTED_A {
match self.bits {
false => HFCLKSTARTED_A::DISABLED,
true => HFCLKSTARTED_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == HFCLKSTARTED_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == HFCLKSTARTED_A::ENABLED
}
}
impl core::ops::Deref for HFCLKSTARTED_R {
type Target = crate::FieldReader<bool, HFCLKSTARTED_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Write '1' to disable interrupt for event HFCLKSTARTED\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum HFCLKSTARTED_AW {
#[doc = "1: Disable"]
CLEAR = 1,
}
impl From<HFCLKSTARTED_AW> for bool {
#[inline(always)]
fn from(variant: HFCLKSTARTED_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `HFCLKSTARTED` writer - Write '1' to disable interrupt for event HFCLKSTARTED"]
pub struct HFCLKSTARTED_W<'a> {
w: &'a mut W,
}
impl<'a> HFCLKSTARTED_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: HFCLKSTARTED_AW) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Disable"]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(HFCLKSTARTED_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Write '1' to disable interrupt for event LFCLKSTARTED\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LFCLKSTARTED_A {
#[doc = "0: Read: Disabled"]
DISABLED = 0,
#[doc = "1: Read: Enabled"]
ENABLED = 1,
}
impl From<LFCLKSTARTED_A> for bool {
#[inline(always)]
fn from(variant: LFCLKSTARTED_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `LFCLKSTARTED` reader - Write '1' to disable interrupt for event LFCLKSTARTED"]
pub struct LFCLKSTARTED_R(crate::FieldReader<bool, LFCLKSTARTED_A>);
impl LFCLKSTARTED_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
LFCLKSTARTED_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> LFCLKSTARTED_A {
match self.bits {
false => LFCLKSTARTED_A::DISABLED,
true => LFCLKSTARTED_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == LFCLKSTARTED_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == LFCLKSTARTED_A::ENABLED
}
}
impl core::ops::Deref for LFCLKSTARTED_R {
type Target = crate::FieldReader<bool, LFCLKSTARTED_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Write '1' to disable interrupt for event LFCLKSTARTED\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LFCLKSTARTED_AW {
#[doc = "1: Disable"]
CLEAR = 1,
}
impl From<LFCLKSTARTED_AW> for bool {
#[inline(always)]
fn from(variant: LFCLKSTARTED_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `LFCLKSTARTED` writer - Write '1' to disable interrupt for event LFCLKSTARTED"]
pub struct LFCLKSTARTED_W<'a> {
w: &'a mut W,
}
impl<'a> LFCLKSTARTED_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: LFCLKSTARTED_AW) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Disable"]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(LFCLKSTARTED_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "Write '1' to disable interrupt for event DONE\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DONE_A {
#[doc = "0: Read: Disabled"]
DISABLED = 0,
#[doc = "1: Read: Enabled"]
ENABLED = 1,
}
impl From<DONE_A> for bool {
#[inline(always)]
fn from(variant: DONE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `DONE` reader - Write '1' to disable interrupt for event DONE"]
pub struct DONE_R(crate::FieldReader<bool, DONE_A>);
impl DONE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
DONE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DONE_A {
match self.bits {
false => DONE_A::DISABLED,
true => DONE_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == DONE_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == DONE_A::ENABLED
}
}
impl core::ops::Deref for DONE_R {
type Target = crate::FieldReader<bool, DONE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Write '1' to disable interrupt for event DONE\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DONE_AW {
#[doc = "1: Disable"]
CLEAR = 1,
}
impl From<DONE_AW> for bool {
#[inline(always)]
fn from(variant: DONE_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `DONE` writer - Write '1' to disable interrupt for event DONE"]
pub struct DONE_W<'a> {
w: &'a mut W,
}
impl<'a> DONE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DONE_AW) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Disable"]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(DONE_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7);
self.w
}
}
impl R {
#[doc = "Bit 0 - Write '1' to disable interrupt for event HFCLKSTARTED"]
#[inline(always)]
pub fn hfclkstarted(&self) -> HFCLKSTARTED_R {
HFCLKSTARTED_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Write '1' to disable interrupt for event LFCLKSTARTED"]
#[inline(always)]
pub fn lfclkstarted(&self) -> LFCLKSTARTED_R {
LFCLKSTARTED_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 7 - Write '1' to disable interrupt for event DONE"]
#[inline(always)]
pub fn done(&self) -> DONE_R {
DONE_R::new(((self.bits >> 7) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Write '1' to disable interrupt for event HFCLKSTARTED"]
#[inline(always)]
pub fn hfclkstarted(&mut self) -> HFCLKSTARTED_W {
HFCLKSTARTED_W { w: self }
}
#[doc = "Bit 1 - Write '1' to disable interrupt for event LFCLKSTARTED"]
#[inline(always)]
pub fn lfclkstarted(&mut self) -> LFCLKSTARTED_W {
LFCLKSTARTED_W { w: self }
}
#[doc = "Bit 7 - Write '1' to disable interrupt for event DONE"]
#[inline(always)]
pub fn done(&mut self) -> DONE_W {
DONE_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Disable interrupt\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intenclr](index.html) module"]
pub struct INTENCLR_SPEC;
impl crate::RegisterSpec for INTENCLR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [intenclr::R](R) reader structure"]
impl crate::Readable for INTENCLR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [intenclr::W](W) writer structure"]
impl crate::Writable for INTENCLR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets INTENCLR to value 0"]
impl crate::Resettable for INTENCLR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 30.656757 | 406 | 0.591819 |
eba332eb32a088bd38e9147fc545d86b31808e17 | 121 | // TODO(#43)
#![allow(clippy::missing_errors_doc)]
#![allow(clippy::missing_panics_doc)]
// Public modules
pub mod cli;
| 17.285714 | 37 | 0.710744 |
f7e7d7c159931d9568defc16843f1aa453ae05c9 | 114 |
mod encoding;
pub use self::encoding::Encode;
mod features;
pub use self::features::{
Ext,
FeatureSet
};
| 8.769231 | 31 | 0.675439 |
617e2fa8c20eb83cc1e7cc4d92b5e2b3de795dbc | 19,929 | // TODO - clean these up
#![allow(unused_imports)]
#![allow(non_camel_case_types)]
use core::marker::PhantomData;
/// Input mode (type state)
pub struct Input<MODE> {
_mode: PhantomData<MODE>,
}
/// Floating input (type state)
pub struct Floating;
/// Pulled down input (type state)
pub struct PullDown;
/// Pulled up input (type state)
pub struct PullUp;
/// Output mode (type state)
pub struct Output<MODE> {
_mode: PhantomData<MODE>,
}
/// Extension trait to split a GPIO peripheral in independent pins and registers
pub trait GpioExt {
/// The to split the GPIO into
type Parts;
/// Splits the GPIO block into independent pins and registers
fn split(
self,
// apb2: &mut APB2
) -> Self::Parts;
}
/// Push pull output (type state)
pub struct PushPull;
/// Open drain output (type state)
pub struct OpenDrain;
// /// Alternate function
// pub struct Alternate<MODE> {
// _mode: PhantomData<MODE>,
// }
/// Represents a digital input or output level
pub enum Level {
Low,
High,
}
// ===============================================================
// Implement Generic Pins for this port, which allows you to use
// other peripherals without having to be completely rust-generic
// across all of the possible pins
// ===============================================================
/// Generic $PX pin
pub struct Pin<MODE> {
pub pin: u8,
#[cfg(feature = "52840")]
pub port: bool,
_mode: PhantomData<MODE>,
}
use crate::target::P0;
#[cfg(feature = "52840")]
use crate::target::{ P1 };
use crate::hal::digital::{OutputPin, StatefulOutputPin, InputPin};
impl<MODE> Pin<MODE> {
/// Convert the pin to be a floating input
pub fn into_floating_input(self) -> Pin<Input<Floating>> {
unsafe {
&(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).pin_cnf[self.pin as usize]
}
.write(|w| {
w.dir().input()
.input().connect()
.pull().disabled()
.drive().s0s1()
.sense().disabled()
});
Pin {
_mode: PhantomData,
#[cfg(feature = "52840")]
port: self.port,
pin: self.pin
}
}
pub fn into_pullup_input(self) -> Pin<Input<PullUp>> {
unsafe {
&(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).pin_cnf[self.pin as usize]
}
.write(|w| {
w.dir().input()
.input().connect()
.pull().pullup()
.drive().s0s1()
.sense().disabled()
});
Pin {
_mode: PhantomData,
#[cfg(feature = "52840")]
port: self.port,
pin: self.pin
}
}
pub fn into_pulldown_input(self) -> Pin<Input<PullDown>> {
unsafe {
&(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).pin_cnf[self.pin as usize]
}
.write(|w| {
w.dir().input()
.input().connect()
.pull().pulldown()
.drive().s0s1()
.sense().disabled()
});
Pin {
_mode: PhantomData,
#[cfg(feature = "52840")]
port: self.port,
pin: self.pin
}
}
/// Convert the pin to be a push-pull output with normal drive
pub fn into_push_pull_output(self, initial_output: Level)
-> Pin<Output<PushPull>>
{
let mut pin = Pin {
_mode: PhantomData,
#[cfg(feature = "52840")]
port: self.port,
pin: self.pin
};
match initial_output {
Level::Low => pin.set_low(),
Level::High => pin.set_high(),
}
unsafe {
&(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).pin_cnf[self.pin as usize]
}
.write(|w| {
w.dir().output()
.input().connect() // AJM - hack for SPI
.pull().disabled()
.drive().s0s1()
.sense().disabled()
});
pin
}
/// Convert the pin to be an open-drain output
///
/// This method currently does not support configuring an
/// internal pull-up or pull-down resistor.
pub fn into_open_drain_output(self,
config: OpenDrainConfig,
initial_output: Level,
)
-> Pin<Output<OpenDrain>>
{
let mut pin = Pin {
_mode: PhantomData,
#[cfg(feature = "52840")]
port: self.port,
pin: self.pin
};
match initial_output {
Level::Low => pin.set_low(),
Level::High => pin.set_high(),
}
// This is safe, as we restrict our access to the dedicated
// register for this pin.
let pin_cnf = unsafe {
&(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).pin_cnf[self.pin as usize]
};
pin_cnf.write(|w| {
w
.dir().output()
.input().disconnect()
.pull().disabled()
.drive().variant(config.variant())
.sense().disabled()
});
pin
}
}
impl<MODE> InputPin for Pin<Input<MODE>> {
fn is_high(&self) -> bool {
!self.is_low()
}
fn is_low(&self) -> bool {
unsafe { (
(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).in_.read().bits() & (1 << self.pin)
) == 0 }
}
}
impl<MODE> OutputPin for Pin<Output<MODE>> {
/// Set the output as high
fn set_high(&mut self) {
// NOTE(unsafe) atomic write to a stateless register - TODO(AJM) verify?
// TODO - I wish I could do something like `.pins$i()`...
unsafe {
(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).outset.write(|w| w.bits(1u32 << self.pin));
}
}
/// Set the output as low
fn set_low(&mut self) {
// NOTE(unsafe) atomic write to a stateless register - TODO(AJM) verify?
// TODO - I wish I could do something like `.pins$i()`...
unsafe {
(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).outclr.write(|w| w.bits(1u32 << self.pin));
}
}
}
impl<MODE> StatefulOutputPin for Pin<Output<MODE>> {
/// Is the output pin set as high?
fn is_set_high(&self) -> bool {
!self.is_set_low()
}
/// Is the output pin set as low?
fn is_set_low(&self) -> bool {
// NOTE(unsafe) atomic read with no side effects - TODO(AJM) verify?
// TODO - I wish I could do something like `.pins$i()`...
unsafe { (
(*{
#[cfg(any(feature = "52810", feature = "52832"))]
{ P0::ptr() }
#[cfg(feature = "52840")]
{ if !self.port { P0::ptr() } else { P1::ptr() } }
}).out.read().bits() & (1 << self.pin)
) == 0 }
}
}
/// Pin configuration for open-drain mode
pub enum OpenDrainConfig {
Disconnect0Standard1,
Disconnect0HighDrive1,
Standard0Disconnect1,
HighDrive0Disconnect1,
}
use crate::target::p0::{
pin_cnf,
PIN_CNF,
};
impl OpenDrainConfig {
fn variant(self) -> pin_cnf::DRIVEW {
use self::OpenDrainConfig::*;
match self {
Disconnect0Standard1 => pin_cnf::DRIVEW::D0S1,
Disconnect0HighDrive1 => pin_cnf::DRIVEW::D0H1,
Standard0Disconnect1 => pin_cnf::DRIVEW::S0D1,
HighDrive0Disconnect1 => pin_cnf::DRIVEW::H0D1,
}
}
}
macro_rules! gpio {
(
$PX:ident, $pxsvd:ident, $px:ident, $port_value:expr, [
$($PXi:ident: ($pxi:ident, $i:expr, $MODE:ty),)+
]
) => {
/// GPIO
pub mod $px {
use super::{
Pin,
// Alternate,
Floating,
GpioExt,
Input,
Level,
OpenDrain,
OpenDrainConfig,
Output,
PullDown,
PullUp,
PushPull,
PhantomData,
};
use crate::target;
use crate::target::$PX;
use crate::hal::digital::{OutputPin, StatefulOutputPin, InputPin};
// ===============================================================
// This chunk allows you to obtain an nrf52-hal gpio from the
// upstream nrf52 gpio definitions by defining a trait
// ===============================================================
/// GPIO parts
pub struct Parts {
$(
/// Pin
pub $pxi: $PXi<$MODE>,
)+
}
impl GpioExt for $PX {
type Parts = Parts;
fn split(self) -> Parts {
Parts {
$(
$pxi: $PXi {
_mode: PhantomData,
},
)+
}
}
}
// ===============================================================
// Implement each of the typed pins usable through the nrf52-hal
// defined interface
// ===============================================================
$(
pub struct $PXi<MODE> {
_mode: PhantomData<MODE>,
}
impl<MODE> $PXi<MODE> {
/// Convert the pin to be a floating input
pub fn into_floating_input(self) -> $PXi<Input<Floating>> {
unsafe { &(*$PX::ptr()).pin_cnf[$i] }.write(|w| {
w.dir().input()
.input().connect()
.pull().disabled()
.drive().s0s1()
.sense().disabled()
});
$PXi {
_mode: PhantomData,
}
}
pub fn into_pulldown_input(self) -> $PXi<Input<PullDown>> {
unsafe { &(*$PX::ptr()).pin_cnf[$i] }.write(|w| {
w.dir().input()
.input().connect()
.pull().pulldown()
.drive().s0s1()
.sense().disabled()
});
$PXi {
_mode: PhantomData,
}
}
pub fn into_pullup_input(self) -> $PXi<Input<PullUp>> {
unsafe { &(*$PX::ptr()).pin_cnf[$i] }.write(|w| {
w.dir().input()
.input().connect()
.pull().pullup()
.drive().s0s1()
.sense().disabled()
});
$PXi {
_mode: PhantomData,
}
}
/// Convert the pin to bepin a push-pull output with normal drive
pub fn into_push_pull_output(self, initial_output: Level)
-> $PXi<Output<PushPull>>
{
let mut pin = $PXi {
_mode: PhantomData,
};
match initial_output {
Level::Low => pin.set_low(),
Level::High => pin.set_high(),
}
unsafe { &(*$PX::ptr()).pin_cnf[$i] }.write(|w| {
w.dir().output()
.input().disconnect()
.pull().disabled()
.drive().s0s1()
.sense().disabled()
});
pin
}
/// Convert the pin to be an open-drain output
///
/// This method currently does not support configuring an
/// internal pull-up or pull-down resistor.
pub fn into_open_drain_output(self,
config: OpenDrainConfig,
initial_output: Level,
)
-> $PXi<Output<OpenDrain>>
{
let mut pin = $PXi {
_mode: PhantomData,
};
match initial_output {
Level::Low => pin.set_low(),
Level::High => pin.set_high(),
}
// This is safe, as we restrict our access to the
// dedicated register for this pin.
let pin_cnf = unsafe {
&(*$PX::ptr()).pin_cnf[$i]
};
pin_cnf.write(|w| {
w
.dir().output()
.input().disconnect()
.pull().disabled()
.drive().variant(config.variant())
.sense().disabled()
});
pin
}
/// Degrade to a generic pin struct, which can be used with peripherals
pub fn degrade(self) -> Pin<MODE> {
Pin {
_mode: PhantomData,
#[cfg(feature = "52840")]
port: $port_value,
pin: $i
}
}
}
impl<MODE> InputPin for $PXi<Input<MODE>> {
fn is_high(&self) -> bool {
!self.is_low()
}
fn is_low(&self) -> bool {
unsafe { ((*$PX::ptr()).in_.read().bits() & (1 << $i)) == 0 }
}
}
impl<MODE> OutputPin for $PXi<Output<MODE>> {
/// Set the output as high
fn set_high(&mut self) {
// NOTE(unsafe) atomic write to a stateless register - TODO(AJM) verify?
// TODO - I wish I could do something like `.pins$i()`...
unsafe { (*$PX::ptr()).outset.write(|w| w.bits(1u32 << $i)); }
}
/// Set the output as low
fn set_low(&mut self) {
// NOTE(unsafe) atomic write to a stateless register - TODO(AJM) verify?
// TODO - I wish I could do something like `.pins$i()`...
unsafe { (*$PX::ptr()).outclr.write(|w| w.bits(1u32 << $i)); }
}
}
impl<MODE> StatefulOutputPin for $PXi<Output<MODE>> {
/// Is the output pin set as high?
fn is_set_high(&self) -> bool {
!self.is_set_low()
}
/// Is the output pin set as low?
fn is_set_low(&self) -> bool {
// NOTE(unsafe) atomic read with no side effects - TODO(AJM) verify?
// TODO - I wish I could do something like `.pins$i()`...
unsafe { ((*$PX::ptr()).out.read().bits() & (1 << $i)) == 0 }
}
}
)+
}
}
}
// ===========================================================================
// Definition of all the items used by the macros above.
//
// For now, it is a little repetitive, especially as the nrf52 only has one
// 32-bit GPIO port (P0)
// ===========================================================================
gpio!(P0, p0, p0, false, [
P0_00: (p0_00, 0, Input<Floating>),
P0_01: (p0_01, 1, Input<Floating>),
P0_02: (p0_02, 2, Input<Floating>),
P0_03: (p0_03, 3, Input<Floating>),
P0_04: (p0_04, 4, Input<Floating>),
P0_05: (p0_05, 5, Input<Floating>),
P0_06: (p0_06, 6, Input<Floating>),
P0_07: (p0_07, 7, Input<Floating>),
P0_08: (p0_08, 8, Input<Floating>),
P0_09: (p0_09, 9, Input<Floating>),
P0_10: (p0_10, 10, Input<Floating>),
P0_11: (p0_11, 11, Input<Floating>),
P0_12: (p0_12, 12, Input<Floating>),
P0_13: (p0_13, 13, Input<Floating>),
P0_14: (p0_14, 14, Input<Floating>),
P0_15: (p0_15, 15, Input<Floating>),
P0_16: (p0_16, 16, Input<Floating>),
P0_17: (p0_17, 17, Input<Floating>),
P0_18: (p0_18, 18, Input<Floating>),
P0_19: (p0_19, 19, Input<Floating>),
P0_20: (p0_20, 20, Input<Floating>),
P0_21: (p0_21, 21, Input<Floating>),
P0_22: (p0_22, 22, Input<Floating>),
P0_23: (p0_23, 23, Input<Floating>),
P0_24: (p0_24, 24, Input<Floating>),
P0_25: (p0_25, 25, Input<Floating>),
P0_26: (p0_26, 26, Input<Floating>),
P0_27: (p0_27, 27, Input<Floating>),
P0_28: (p0_28, 28, Input<Floating>),
P0_29: (p0_29, 29, Input<Floating>),
P0_30: (p0_30, 30, Input<Floating>),
P0_31: (p0_31, 31, Input<Floating>),
]);
// The p1 types are present in the p0 module generated from the
// svd, but we want to export them in a p1 module from this crate.
#[cfg(feature = "52840")]
gpio!(P1, p0, p1, true, [
P1_00: (p1_00, 0, Input<Floating>),
P1_01: (p1_01, 1, Input<Floating>),
P1_02: (p1_02, 2, Input<Floating>),
P1_03: (p1_03, 3, Input<Floating>),
P1_04: (p1_04, 4, Input<Floating>),
P1_05: (p1_05, 5, Input<Floating>),
P1_06: (p1_06, 6, Input<Floating>),
P1_07: (p1_07, 7, Input<Floating>),
P1_08: (p1_08, 8, Input<Floating>),
P1_09: (p1_09, 9, Input<Floating>),
P1_10: (p1_10, 10, Input<Floating>),
P1_11: (p1_11, 11, Input<Floating>),
P1_12: (p1_12, 12, Input<Floating>),
P1_13: (p1_13, 13, Input<Floating>),
P1_14: (p1_14, 14, Input<Floating>),
P1_15: (p1_15, 15, Input<Floating>),
]);
| 32.670492 | 96 | 0.416579 |
e4d6a59d42c838c37d14d914bc10cadab1728673 | 3,236 | #[doc = "Register `HBN_PIR_INTERVAL` reader"]
pub struct R(crate::R<HBN_PIR_INTERVAL_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<HBN_PIR_INTERVAL_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<HBN_PIR_INTERVAL_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<HBN_PIR_INTERVAL_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `HBN_PIR_INTERVAL` writer"]
pub struct W(crate::W<HBN_PIR_INTERVAL_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<HBN_PIR_INTERVAL_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<HBN_PIR_INTERVAL_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<HBN_PIR_INTERVAL_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `pir_interval` reader - "]
pub struct PIR_INTERVAL_R(crate::FieldReader<u16, u16>);
impl PIR_INTERVAL_R {
pub(crate) fn new(bits: u16) -> Self {
PIR_INTERVAL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for PIR_INTERVAL_R {
type Target = crate::FieldReader<u16, u16>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `pir_interval` writer - "]
pub struct PIR_INTERVAL_W<'a> {
w: &'a mut W,
}
impl<'a> PIR_INTERVAL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0fff) | (value as u32 & 0x0fff);
self.w
}
}
impl R {
#[doc = "Bits 0:11"]
#[inline(always)]
pub fn pir_interval(&self) -> PIR_INTERVAL_R {
PIR_INTERVAL_R::new((self.bits & 0x0fff) as u16)
}
}
impl W {
#[doc = "Bits 0:11"]
#[inline(always)]
pub fn pir_interval(&mut self) -> PIR_INTERVAL_W {
PIR_INTERVAL_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "HBN_PIR_INTERVAL.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [hbn_pir_interval](index.html) module"]
pub struct HBN_PIR_INTERVAL_SPEC;
impl crate::RegisterSpec for HBN_PIR_INTERVAL_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [hbn_pir_interval::R](R) reader structure"]
impl crate::Readable for HBN_PIR_INTERVAL_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [hbn_pir_interval::W](W) writer structure"]
impl crate::Writable for HBN_PIR_INTERVAL_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets HBN_PIR_INTERVAL to value 0"]
impl crate::Resettable for HBN_PIR_INTERVAL_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 31.417476 | 414 | 0.628863 |
e649de53aa931c0a4304a3c7414a8a25ed29aa52 | 4,412 | use std::marker::PhantomData;
use std::sync::Arc;
use bytes::{Bytes, BytesMut};
use futures::future::ready;
use futures::stream::{SplitSink, SplitStream};
use futures::{Sink, SinkExt, Stream, StreamExt};
use orion::aead::streaming::{StreamOpener, StreamSealer};
use orion::kdf::SecretKey;
use serde::de::DeserializeOwned;
use serde::Serialize;
use tako::transfer::auth::{do_authentication, open_message, seal_message};
use tokio::net::TcpStream;
use tokio_util::codec::{Framed, LengthDelimitedCodec};
use crate::common::error::error;
use crate::common::serverdir::AccessRecord;
use crate::transfer::messages::{FromClientMessage, ToClientMessage};
use crate::transfer::protocol::make_protocol_builder;
type Codec = Framed<TcpStream, LengthDelimitedCodec>;
const COMM_PROTOCOL: u32 = 0;
pub struct HqConnection<ReceiveMsg, SendMsg> {
writer: SplitSink<Codec, Bytes>,
reader: SplitStream<Codec>,
sealer: Option<StreamSealer>,
opener: Option<StreamOpener>,
_r: PhantomData<ReceiveMsg>,
_s: PhantomData<SendMsg>,
}
impl<R: DeserializeOwned, S: Serialize> HqConnection<R, S> {
pub async fn send(&mut self, item: S) -> crate::Result<()> {
let data = serialize_message(item, &mut self.sealer)?;
self.writer.send(data).await?;
Ok(())
}
pub async fn receive(&mut self) -> Option<crate::Result<R>> {
self.reader.next().await.map(|msg| {
msg.map_err(|e| e.into())
.and_then(|m| deserialize_message(Ok(m), &mut self.opener))
})
}
pub async fn send_and_receive(&mut self, item: S) -> crate::Result<R> {
self.send(item).await?;
match self.receive().await {
Some(msg) => msg,
None => error("Expected response was not received".into()),
}
}
pub fn split(
self,
) -> (
impl Sink<S, Error = crate::Error>,
impl Stream<Item = crate::Result<R>>,
) {
let HqConnection {
reader,
writer,
mut sealer,
mut opener,
..
} = self;
let sink = writer.with(move |msg| ready(serialize_message(msg, &mut sealer)));
let stream = reader.map(move |message| deserialize_message(message, &mut opener));
(sink, stream)
}
async fn init(socket: TcpStream, server: bool, key: Arc<SecretKey>) -> crate::Result<Self> {
let connection = make_protocol_builder().new_framed(socket);
let (mut tx, mut rx) = connection.split();
let mut my_role = "hq-server".to_string();
let mut peer_role = "hq-client".to_string();
if !server {
std::mem::swap(&mut my_role, &mut peer_role);
}
let (sealer, opener) = do_authentication(
COMM_PROTOCOL,
my_role,
peer_role,
Some(key),
&mut tx,
&mut rx,
)
.await?;
Ok(Self {
writer: tx,
reader: rx,
sealer,
opener,
_r: Default::default(),
_s: Default::default(),
})
}
}
pub type ClientConnection = HqConnection<ToClientMessage, FromClientMessage>;
pub type ServerConnection = HqConnection<FromClientMessage, ToClientMessage>;
/// Client -> server connection
impl ClientConnection {
pub async fn connect_to_server(record: &AccessRecord) -> crate::Result<ClientConnection> {
let address = format!("{}:{}", record.host(), record.server_port());
let connection = TcpStream::connect(address).await?;
let key = record.hq_secret_key().clone();
HqConnection::init(connection, false, key).await
}
}
/// Server -> client connection
impl ServerConnection {
pub async fn accept_client(
socket: TcpStream,
key: Arc<SecretKey>,
) -> crate::Result<ServerConnection> {
HqConnection::init(socket, true, key).await
}
}
fn serialize_message<S: Serialize>(
item: S,
mut sealer: &mut Option<StreamSealer>,
) -> crate::Result<Bytes> {
let data = tako::transfer::auth::serialize(&item)?;
Ok(seal_message(&mut sealer, data.into()))
}
fn deserialize_message<R: DeserializeOwned>(
message: Result<BytesMut, std::io::Error>,
mut opener: &mut Option<StreamOpener>,
) -> crate::Result<R> {
let message = message?;
let item = open_message(&mut opener, &message)?;
Ok(item)
}
| 30.013605 | 96 | 0.614914 |
640076c9613d482600580f3b3f848b105eee2615 | 859 | table! {
events (id) {
id -> Integer,
location -> Integer,
name -> Text,
description -> Nullable<Text>,
start_time -> Timestamp,
end_time -> Timestamp,
timezone -> Text,
uuid -> Binary,
created -> Timestamp,
updated -> Timestamp,
}
}
table! {
locations (id) {
id -> Integer,
name -> Text,
description -> Nullable<Text>,
address -> Nullable<Text>,
city -> Nullable<Text>,
region -> Nullable<Text>,
postal -> Nullable<Text>,
country -> Nullable<Text>,
lat -> Double,
lng -> Double,
uuid -> Binary,
created -> Timestamp,
updated -> Timestamp,
}
}
joinable!(events -> locations (location));
allow_tables_to_appear_in_same_query!(
events,
locations,
);
| 21.475 | 42 | 0.519208 |
039380f06b581e4f4482a61a0c8ad6b80900b236 | 23,062 | extern crate xmltree;
use self::xmltree::Element;
use crate::ExpectedType;
use crate::other_helpers::*;
use crate::xml_helpers::*;
use std::collections::HashSet;
use std::fs::File;
use std::io::{BufReader, Write};
use std::path::Path;
pub fn generate_tables(generated_dir: &Path) {
let element = load_xml();
let mut fun = String::new();
fun.push_str("
// The contents of this file are automatically generated and should not be modified directly. See the `build` directory.
extern crate itertools;
use crate::{
CodePair,
CodePairValue,
Color,
Drawing,
DrawingItem,
DrawingItemMut,
DxfError,
DxfResult,
ExtensionGroup,
LineWeight,
Point,
Vector,
XData,
};
use crate::code_pair_put_back::CodePairPutBack;
use crate::code_pair_writer::CodePairWriter;
use crate::helper_functions::*;
use crate::extension_data;
use crate::x_data;
use crate::enums::*;
use crate::enum_primitive::FromPrimitive;
use std::io::{Read, Write};
".trim_start());
fun.push_str("\n");
generate_table_items(&mut fun, &element);
generate_table_reader(&mut fun, &element);
generate_table_writer(&mut fun, &element);
let mut file = File::create(generated_dir.join("tables.rs")).ok().unwrap();
file.write_all(fun.as_bytes()).ok().unwrap();
}
fn generate_table_items(fun: &mut String, element: &Element) {
for table in &element.children {
let mut seen_fields = HashSet::new();
let table_item = &table.children[0];
fun.push_str("#[derive(Debug)]\n");
fun.push_str("#[cfg_attr(feature = \"serialize\", derive(Serialize, Deserialize))]\n");
fun.push_str(&format!("pub struct {name} {{\n", name = name(&table_item)));
fun.push_str(" pub name: String,\n");
fun.push_str(" pub handle: u32,\n");
fun.push_str(" #[doc(hidden)]\n");
fun.push_str(" pub __owner_handle: u32,\n");
fun.push_str(" pub extension_data_groups: Vec<ExtensionGroup>,\n");
fun.push_str(" pub x_data: Vec<XData>,\n");
for field in &table_item.children {
let name = if field.name == "Pointer" {
format!("__{}_handle", name(&field))
} else {
name(&field)
};
if !seen_fields.contains(&name) {
seen_fields.insert(name.clone());
let mut typ = if field.name == "Pointer" {
String::from("u32")
} else {
attr(&field, "Type")
};
if allow_multiples(&field) {
typ = format!("Vec<{}>", typ);
}
let is_private = name.starts_with("_");
if is_private {
fun.push_str(" #[doc(hidden)]\n");
}
fun.push_str(&format!(" pub {name}: {typ},\n", name = name, typ = typ));
}
}
fun.push_str("}\n");
fun.push_str("\n");
seen_fields.clear();
fun.push_str(&format!(
"impl Default for {name} {{\n",
name = name(&table_item)
));
fun.push_str(" fn default() -> Self {\n");
fun.push_str(&format!(" {name} {{\n", name = name(&table_item)));
fun.push_str(" name: String::new(),\n");
fun.push_str(" handle: 0,\n");
fun.push_str(" __owner_handle: 0,\n");
fun.push_str(" extension_data_groups: vec![],\n");
fun.push_str(" x_data: vec![],\n");
for field in &table_item.children {
let name = if field.name == "Pointer" {
format!("__{}_handle", name(&field))
} else {
name(&field)
};
if !seen_fields.contains(&name) {
seen_fields.insert(name.clone());
let default_value = match (&*field.name, allow_multiples(&field)) {
("Pointer", true) => String::from("vec![]"),
("Pointer", false) => String::from("0"),
(_, _) => attr(&field, "DefaultValue"),
};
fun.push_str(&format!(
" {field}: {default_value},\n",
field = name,
default_value = default_value
));
}
}
fun.push_str(" }\n");
fun.push_str(" }\n");
fun.push_str("}\n");
fun.push_str("\n");
fun.push_str(&format!("impl {name} {{\n", name = name(&table_item)));
fun.push_str(
" pub fn get_owner<'a>(&self, drawing: &'a Drawing) -> Option<DrawingItem<'a>> {\n",
);
fun.push_str(" drawing.get_item_by_handle(self.__owner_handle)\n");
fun.push_str(" }\n");
fun.push_str(" pub fn set_owner<'a>(&mut self, item: &'a mut DrawingItemMut, drawing: &'a mut Drawing) {\n");
fun.push_str(" self.__owner_handle = drawing.assign_and_get_handle(item);\n");
fun.push_str(" }\n");
fun.push_str("}\n");
fun.push_str("\n");
}
}
fn generate_table_reader(fun: &mut String, element: &Element) {
fun.push_str("pub(crate) fn read_specific_table<I>(drawing: &mut Drawing, iter: &mut CodePairPutBack<I>) -> DxfResult<()>\n");
fun.push_str(" where I: Read {\n");
fun.push_str("\n");
fun.push_str(" match iter.next() {\n");
fun.push_str(" Some(Ok(pair)) => {\n");
fun.push_str(" if pair.code != 2 {\n");
fun.push_str(" return Err(DxfError::ExpectedTableType(pair.offset));\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" match &*pair.assert_string()? {\n");
for table in &element.children {
fun.push_str(&format!(
" \"{table_name}\" => read_{collection}(drawing, iter)?,\n",
table_name = attr(&table, "TypeString"),
collection = attr(&table, "Collection")
));
}
fun.push_str(" _ => Drawing::swallow_table(iter)?,\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" match iter.next() {\n");
fun.push_str(" Some(Ok(CodePair { code: 0, value: CodePairValue::Str(ref s), .. })) if s == \"ENDTAB\" => (),\n");
fun.push_str(" Some(Ok(pair)) => return Err(DxfError::UnexpectedCodePair(pair, String::from(\"expected 0/ENDTAB\"))),\n");
fun.push_str(" Some(Err(e)) => return Err(e),\n");
fun.push_str(" None => return Err(DxfError::UnexpectedEndOfInput),\n");
fun.push_str(" }\n");
fun.push_str(" },\n");
fun.push_str(" Some(Err(e)) => return Err(e),\n");
fun.push_str(" None => return Err(DxfError::UnexpectedEndOfInput),\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" Ok(())\n");
fun.push_str("}\n");
fun.push_str("\n");
for table in &element.children {
let table_item = &table.children[0];
let collection = attr(&table, "Collection");
let (item_type, _) = collection.split_at(collection.len() - 1); // remove the 's' suffix
fun.push_str(&format!("fn read_{collection}<I>(drawing: &mut Drawing, iter: &mut CodePairPutBack<I>) -> DxfResult<()>\n", collection=attr(&table, "Collection")));
fun.push_str(" where I: Read {\n");
fun.push_str("\n");
fun.push_str(" loop {\n");
fun.push_str(" match iter.next() {\n");
fun.push_str(" Some(Ok(pair)) => {\n");
fun.push_str(" if pair.code == 0 {\n");
fun.push_str(&format!(
" if pair.assert_string()? != \"{table_type}\" {{\n",
table_type = attr(&table, "TypeString")
));
fun.push_str(" iter.put_back(Ok(pair));\n");
fun.push_str(" break;\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(&format!(
" let mut item = {typ}::default();\n",
typ = attr(&table_item, "Name")
));
fun.push_str(" loop {\n");
fun.push_str(" match iter.next() {\n");
fun.push_str(
" Some(Ok(pair @ CodePair { code: 0, .. })) => {\n",
);
fun.push_str(" iter.put_back(Ok(pair));\n");
fun.push_str(" break;\n");
fun.push_str(" },\n");
fun.push_str(" Some(Ok(pair)) => {\n");
fun.push_str(" match pair.code {\n");
fun.push_str(
" 2 => item.name = pair.assert_string()?,\n",
);
fun.push_str(" 5 => item.handle = pair.as_handle()?,\n");
fun.push_str(
" extension_data::EXTENSION_DATA_GROUP => {\n",
);
fun.push_str(" let group = ExtensionGroup::read_group(pair.assert_string()?, iter, pair.offset)?;\n");
fun.push_str(
" item.extension_data_groups.push(group);\n",
);
fun.push_str(" },\n");
fun.push_str(" x_data::XDATA_APPLICATIONNAME => {\n");
fun.push_str(" let x = XData::read_item(pair.assert_string()?, iter)?;\n");
fun.push_str(" item.x_data.push(x);\n");
fun.push_str(" },\n");
fun.push_str(
" 330 => item.__owner_handle = pair.as_handle()?,\n",
);
for field in &table_item.children {
if generate_reader(&field) {
for (i, &cd) in codes(&field).iter().enumerate() {
let reader = get_field_reader(&field);
let codes = codes(&field);
let write_cmd = match codes.len() {
1 => {
let read_fun = if allow_multiples(&field) {
format!(".push({})", reader)
} else {
format!(" = {}", reader)
};
let normalized_field_name = if field.name == "Pointer" {
format!("__{}_handle", name(&field))
} else {
name(&field)
};
format!(
"item.{field}{read_fun}",
field = normalized_field_name,
read_fun = read_fun
)
}
_ => {
let suffix = match i {
0 => "x",
1 => "y",
2 => "z",
_ => panic!("impossible"),
};
format!(
"item.{field}.{suffix} = {reader}",
field = name(&field),
suffix = suffix,
reader = reader
)
}
};
fun.push_str(&format!(
" {code} => {{ {cmd}; }},\n",
code = cd,
cmd = write_cmd
));
}
}
}
fun.push_str(" _ => (), // unsupported code\n");
fun.push_str(" }\n");
fun.push_str(" },\n");
fun.push_str(" Some(Err(e)) => return Err(e),\n");
fun.push_str(
" None => return Err(DxfError::UnexpectedEndOfInput),\n",
);
fun.push_str(" }\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" if item.handle == 0 {\n");
fun.push_str(&format!(
" drawing.add_{item_type}(item);\n",
item_type = item_type
));
fun.push_str(" }\n");
fun.push_str(" else {\n");
fun.push_str(&format!(
" drawing.add_{item_type}_no_handle_set(item);\n",
item_type = item_type
));
fun.push_str(" }\n");
fun.push_str(" }\n");
fun.push_str(" else {\n");
fun.push_str(" // do nothing, probably the table's handle or flags\n");
fun.push_str(" }\n");
fun.push_str(" },\n");
fun.push_str(" Some(Err(e)) => return Err(e),\n");
fun.push_str(" None => return Err(DxfError::UnexpectedEndOfInput),\n");
fun.push_str(" }\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" Ok(())\n");
fun.push_str("}\n");
fun.push_str("\n");
}
}
fn generate_table_writer(fun: &mut String, element: &Element) {
fun.push_str("pub(crate) fn write_tables<T>(drawing: &Drawing, write_handles: bool, writer: &mut CodePairWriter<T>) -> DxfResult<()>\n");
fun.push_str(" where T: Write + ?Sized {\n");
fun.push_str("\n");
for table in &element.children {
fun.push_str(&format!(
" write_{collection}(drawing, write_handles, writer)?;\n",
collection = attr(&table, "Collection")
));
}
fun.push_str(" Ok(())\n");
fun.push_str("}\n");
fun.push_str("\n");
for table in &element.children {
let table_item = &table.children[0];
fun.push_str("#[allow(clippy::cognitive_complexity)] // long function, no good way to simplify this\n");
fun.push_str(&format!("fn write_{collection}<T>(drawing: &Drawing, write_handles: bool, writer: &mut CodePairWriter<T>) -> DxfResult<()>\n", collection=attr(&table, "Collection")));
fun.push_str(" where T: Write + ?Sized {\n");
fun.push_str("\n");
fun.push_str(&format!(
" if !drawing.{collection}().any(|_| true) {{ // is empty\n",
collection = attr(&table, "Collection")
));
fun.push_str(" return Ok(()) // nothing to write\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" writer.write_code_pair(&CodePair::new_str(0, \"TABLE\"))?;\n");
fun.push_str(&format!(
" writer.write_code_pair(&CodePair::new_str(2, \"{type_string}\"))?;\n",
type_string = attr(&table, "TypeString")
));
// TODO: assign and write table handles
// fun.push_str(" if write_handles {\n");
// fun.push_str(" writer.write_code_pair(&CodePair::new_str(5, \"0\"))?;\n");
// fun.push_str(" }\n");
// fun.push_str("\n");
let item_type = name(&table_item);
fun.push_str(
" writer.write_code_pair(&CodePair::new_str(100, \"AcDbSymbolTable\"))?;\n",
);
fun.push_str(" writer.write_code_pair(&CodePair::new_i16(70, 0))?;\n");
fun.push_str(&format!(
" for item in drawing.{collection}() {{\n",
collection = attr(&table, "Collection")
));
fun.push_str(&format!(
" writer.write_code_pair(&CodePair::new_str(0, \"{type_string}\"))?;\n",
type_string = attr(&table, "TypeString")
));
fun.push_str(" if write_handles {\n");
fun.push_str(&format!(" writer.write_code_pair(&CodePair::new_string(5, &as_handle(DrawingItem::{item_type}(&item).get_handle())))?;\n",
item_type=item_type));
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" if drawing.header.version >= AcadVersion::R14 {\n");
fun.push_str(" for group in &item.extension_data_groups {\n");
fun.push_str(" group.write(writer)?;\n");
fun.push_str(" }\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" writer.write_code_pair(&CodePair::new_str(100, \"AcDbSymbolTableRecord\"))?;\n");
fun.push_str(&format!(
" writer.write_code_pair(&CodePair::new_str(100, \"{class_name}\"))?;\n",
class_name = attr(&table_item, "ClassName")
));
fun.push_str(" writer.write_code_pair(&CodePair::new_string(2, &item.name))?;\n");
fun.push_str(" writer.write_code_pair(&CodePair::new_i16(70, 0))?;\n"); // TODO: flags
for field in &table_item.children {
if generate_writer(&field) {
let mut predicates = vec![];
if !min_version(&field).is_empty() {
predicates.push(format!(
"drawing.header.version >= AcadVersion::{}",
min_version(&field)
));
}
if !max_version(&field).is_empty() {
predicates.push(format!(
"drawing.header.version <= AcadVersion::{}",
max_version(&field)
));
}
if !write_condition(&field).is_empty() {
predicates.push(write_condition(&field));
}
if disable_writing_default(&field) {
predicates.push(format!(
"item.{field} != {default_value}",
field = name(&field),
default_value = default_value(&field)
));
}
let indent = if predicates.len() == 0 { "" } else { " " };
if predicates.len() != 0 {
fun.push_str(&format!(
" if {predicate} {{\n",
predicate = predicates.join(" && ")
));
}
if allow_multiples(&field) {
let code = code(&field);
if field.name == "Pointer" {
fun.push_str(&format!(
"{indent} for x in &item.__{field}_handle {{\n",
indent = indent,
field = name(&field)
));
fun.push_str(&format!("{indent} writer.write_code_pair(&CodePair::new_string({code}, &as_handle(*x)))?;\n",
indent=indent, code=code));
} else {
let typ = ExpectedType::get_expected_type(code).unwrap();
let typ = get_code_pair_type(typ);
let deref = if typ == "string" { "" } else { "*" };
fun.push_str(&format!(
"{indent} for x in &item.{field} {{\n",
indent = indent,
field = name(&field)
));
fun.push_str(&format!("{indent} writer.write_code_pair(&CodePair::new_{typ}({code}, {deref}x))?;\n",
indent=indent, typ=typ, code=code, deref=deref));
}
fun.push_str(&format!("{indent} }}\n", indent = indent));
} else {
let codes = codes(&field);
if codes.len() == 1 {
let code = codes[0];
if field.name == "Pointer" {
fun.push_str(&format!("{indent} writer.write_code_pair(&CodePair::new_string({code}, &as_handle(item.__{field}_handle)))?;\n",
indent=indent, code=code, field=name(&field)));
} else {
let typ = ExpectedType::get_expected_type(code).unwrap();
let typ = get_code_pair_type(typ);
let value = format!("item.{}", name(&field));
let write_converter = if attr(&field, "WriteConverter").is_empty() {
String::from("{}")
} else {
attr(&field, "WriteConverter")
};
let value = write_converter.replace("{}", &value);
fun.push_str(&format!("{indent} writer.write_code_pair(&CodePair::new_{typ}({code}, {value}))?;\n",
indent=indent, typ=typ, code=code, value=value));
}
} else {
for (i, code) in codes.iter().enumerate() {
let suffix = match i {
0 => "x",
1 => "y",
2 => "z",
_ => panic!("impossible"),
};
fun.push_str(&format!("{indent} writer.write_code_pair(&CodePair::new_f64({code}, item.{field}.{suffix}))?;\n",
indent=indent, code=code, field=name(&field), suffix=suffix));
}
}
}
if predicates.len() != 0 {
fun.push_str(" }\n");
}
}
}
fun.push_str(" for x in &item.x_data {\n");
fun.push_str(" x.write(drawing.header.version, writer)?;\n");
fun.push_str(" }\n");
fun.push_str(" }\n");
fun.push_str("\n");
fun.push_str(" writer.write_code_pair(&CodePair::new_str(0, \"ENDTAB\"))?;\n");
fun.push_str(" Ok(())\n");
fun.push_str("}\n");
fun.push_str("\n");
}
}
fn load_xml() -> Element {
let file = File::open("spec/TableSpec.xml").unwrap();
let file = BufReader::new(file);
Element::parse(file).unwrap()
}
| 45.308448 | 189 | 0.439121 |
d954eb838cb8a5fed42a31a11f5c1f64f916aefd | 36,256 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use attributes;
use llvm;
use llvm_util;
use rustc::dep_graph::DepGraphSafe;
use rustc::hir;
use debuginfo;
use monomorphize::Instance;
use value::Value;
use monomorphize::partitioning::CodegenUnit;
use type_::Type;
use type_of::PointeeInfo;
use rustc_codegen_ssa::traits::*;
use libc::c_uint;
use rustc_data_structures::base_n;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc::mir::mono::Stats;
use rustc::session::config::{self, DebugInfo};
use rustc::session::Session;
use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout, VariantIdx};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap;
use rustc_target::spec::{HasTargetSpec, Target};
use rustc_codegen_ssa::callee::resolve_and_get_fn;
use rustc_codegen_ssa::base::wants_msvc_seh;
use callee::get_fn;
use std::ffi::CStr;
use std::cell::{Cell, RefCell};
use std::iter;
use std::str;
use std::sync::Arc;
use syntax::symbol::LocalInternedString;
use abi::Abi;
/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
/// `llvm::Context` so that several compilation units may be optimized in parallel.
/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
pub struct CodegenCx<'ll, 'tcx: 'll> {
pub tcx: TyCtxt<'ll, 'tcx, 'tcx>,
pub check_overflow: bool,
pub use_dll_storage_attrs: bool,
pub tls_model: llvm::ThreadLocalMode,
pub llmod: &'ll llvm::Module,
pub llcx: &'ll llvm::Context,
pub stats: RefCell<Stats>,
pub codegen_unit: Arc<CodegenUnit<'tcx>>,
/// Cache instances of monomorphic and polymorphic items
pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
/// Cache generated vtables
pub vtables: RefCell<FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), &'ll Value>>,
/// Cache of constant strings,
pub const_cstr_cache: RefCell<FxHashMap<LocalInternedString, &'ll Value>>,
/// Reverse-direction for const ptrs cast from globals.
/// Key is a Value holding a *T,
/// Val is a Value holding a *[T].
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during codegen
/// of a [T] const because we form a slice, a (*T,usize) pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
/// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
/// List of globals for static variables which need to be passed to the
/// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
/// (We have to make sure we don't invalidate any Values referring
/// to constants.)
pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
/// Statics that will be placed in the llvm.used variable
/// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details
pub used_statics: RefCell<Vec<&'ll Value>>,
pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>,
pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
pub isize_ty: &'ll Type,
pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,
eh_personality: Cell<Option<&'ll Value>>,
eh_unwind_resume: Cell<Option<&'ll Value>>,
pub rust_try_fn: Cell<Option<&'ll Value>>,
intrinsics: RefCell<FxHashMap<&'static str, &'ll Value>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
}
impl<'ll, 'tcx> DepGraphSafe for CodegenCx<'ll, 'tcx> {}
pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode {
let reloc_model_arg = match sess.opts.cg.relocation_model {
Some(ref s) => &s[..],
None => &sess.target.target.options.relocation_model[..],
};
match ::back::write::RELOC_MODEL_ARGS.iter().find(
|&&arg| arg.0 == reloc_model_arg) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid relocation mode",
reloc_model_arg));
sess.abort_if_errors();
bug!();
}
}
}
fn get_tls_model(sess: &Session) -> llvm::ThreadLocalMode {
let tls_model_arg = match sess.opts.debugging_opts.tls_model {
Some(ref s) => &s[..],
None => &sess.target.target.options.tls_model[..],
};
match ::back::write::TLS_MODEL_ARGS.iter().find(
|&&arg| arg.0 == tls_model_arg) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid TLS model",
tls_model_arg));
sess.abort_if_errors();
bug!();
}
}
}
fn is_any_library(sess: &Session) -> bool {
sess.crate_types.borrow().iter().any(|ty| {
*ty != config::CrateType::Executable
})
}
pub fn is_pie_binary(sess: &Session) -> bool {
!is_any_library(sess) && get_reloc_model(sess) == llvm::RelocMode::PIC
}
pub unsafe fn create_module(
sess: &Session,
llcx: &'ll llvm::Context,
mod_name: &str,
) -> &'ll llvm::Module {
let mod_name = SmallCStr::new(mod_name);
let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.target.options.is_builtin {
let tm = ::back::write::create_target_machine(sess, false);
llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
llvm::LLVMRustDisposeTargetMachine(tm);
let data_layout = llvm::LLVMGetDataLayout(llmod);
let data_layout = str::from_utf8(CStr::from_ptr(data_layout).to_bytes())
.ok().expect("got a non-UTF8 data-layout from LLVM");
// Unfortunately LLVM target specs change over time, and right now we
// don't have proper support to work with any more than one
// `data_layout` than the one that is in the rust-lang/rust repo. If
// this compiler is configured against a custom LLVM, we may have a
// differing data layout, even though we should update our own to use
// that one.
//
// As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
// disable this check entirely as we may be configured with something
// that has a different target layout.
//
// Unsure if this will actually cause breakage when rustc is configured
// as such.
//
// FIXME(#34960)
let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
let custom_llvm_used = cfg_llvm_root.trim() != "";
if !custom_llvm_used && sess.target.target.data_layout != data_layout {
bug!("data-layout for builtin `{}` target, `{}`, \
differs from LLVM default, `{}`",
sess.target.target.llvm_target,
sess.target.target.data_layout,
data_layout);
}
}
let data_layout = SmallCStr::new(&sess.target.target.data_layout);
llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
let llvm_target = SmallCStr::new(&sess.target.target.llvm_target);
llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
if is_pie_binary(sess) {
llvm::LLVMRustSetModulePIELevel(llmod);
}
// If skipping the PLT is enabled, we need to add some module metadata
// to ensure intrinsic calls don't use it.
if !sess.needs_plt() {
let avoid_plt = "RtLibUseGOT\0".as_ptr() as *const _;
llvm::LLVMRustAddModuleFlag(llmod, avoid_plt, 1);
}
llmod
}
impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
crate fn new(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
codegen_unit: Arc<CodegenUnit<'tcx>>,
llvm_module: &'ll ::ModuleLlvm)
-> Self {
// An interesting part of Windows which MSVC forces our hand on (and
// apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
// attributes in LLVM IR as well as native dependencies (in C these
// correspond to `__declspec(dllimport)`).
//
// Whenever a dynamic library is built by MSVC it must have its public
// interface specified by functions tagged with `dllexport` or otherwise
// they're not available to be linked against. This poses a few problems
// for the compiler, some of which are somewhat fundamental, but we use
// the `use_dll_storage_attrs` variable below to attach the `dllexport`
// attribute to all LLVM functions that are exported e.g. they're
// already tagged with external linkage). This is suboptimal for a few
// reasons:
//
// * If an object file will never be included in a dynamic library,
// there's no need to attach the dllexport attribute. Most object
// files in Rust are not destined to become part of a dll as binaries
// are statically linked by default.
// * If the compiler is emitting both an rlib and a dylib, the same
// source object file is currently used but with MSVC this may be less
// feasible. The compiler may be able to get around this, but it may
// involve some invasive changes to deal with this.
//
// The flipside of this situation is that whenever you link to a dll and
// you import a function from it, the import should be tagged with
// `dllimport`. At this time, however, the compiler does not emit
// `dllimport` for any declarations other than constants (where it is
// required), which is again suboptimal for even more reasons!
//
// * Calling a function imported from another dll without using
// `dllimport` causes the linker/compiler to have extra overhead (one
// `jmp` instruction on x86) when calling the function.
// * The same object file may be used in different circumstances, so a
// function may be imported from a dll if the object is linked into a
// dll, but it may be just linked against if linked into an rlib.
// * The compiler has no knowledge about whether native functions should
// be tagged dllimport or not.
//
// For now the compiler takes the perf hit (I do not have any numbers to
// this effect) by marking very little as `dllimport` and praying the
// linker will take care of everything. Fixing this problem will likely
// require adding a few attributes to Rust itself (feature gated at the
// start) and then strongly recommending static linkage on MSVC!
let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc;
let check_overflow = tcx.sess.overflow_checks();
let tls_model = get_tls_model(&tcx.sess);
let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
let dctx = debuginfo::CrateDebugContext::new(llmod);
debuginfo::metadata::compile_unit_metadata(tcx,
&codegen_unit.name().as_str(),
&dctx);
Some(dctx)
} else {
None
};
let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
CodegenCx {
tcx,
check_overflow,
use_dll_storage_attrs,
tls_model,
llmod,
llcx,
stats: RefCell::new(Stats::default()),
codegen_unit,
instances: Default::default(),
vtables: Default::default(),
const_cstr_cache: Default::default(),
const_unsized: Default::default(),
const_globals: Default::default(),
statics_to_rauw: RefCell::new(Vec::new()),
used_statics: RefCell::new(Vec::new()),
lltypes: Default::default(),
scalar_lltypes: Default::default(),
pointee_infos: Default::default(),
isize_ty,
dbg_cx,
eh_personality: Cell::new(None),
eh_unwind_resume: Cell::new(None),
rust_try_fn: Cell::new(None),
intrinsics: Default::default(),
local_gen_sym_counter: Cell::new(0),
}
}
}
impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>,
ty::PolyExistentialTraitRef<'tcx>), &'ll Value>>
{
&self.vtables
}
fn instances(&self) -> &RefCell<FxHashMap<Instance<'tcx>, &'ll Value>> {
&self.instances
}
fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
get_fn(&&self,instance)
}
fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value {
llvm::get_param(llfn, index)
}
fn eh_personality(&self) -> &'ll Value {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to codegen that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
//
// Note that MSVC is a little special here in that we don't use the
// `eh_personality` lang item at all. Currently LLVM has support for
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
// *name of the personality function* to decide what kind of unwind side
// tables/landing pads to emit. It looks like Dwarf is used by default,
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
// an "exception", but for MSVC we want to force SEH. This means that we
// can't actually have the personality function be our standard
// `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH".
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality
}
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !wants_msvc_seh(self.sess()) => {
resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
}
_ => {
let name = if wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let fty = self.type_variadic_func(&[], self.type_i32());
self.declare_cfn(name, fty)
}
};
attributes::apply_target_cpu_attr(self, llfn);
self.eh_personality.set(Some(llfn));
llfn
}
// Returns a Value of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function.
fn eh_unwind_resume(&self) -> &'ll Value {
use attributes;
let unwresume = &self.eh_unwind_resume;
if let Some(llfn) = unwresume.get() {
return llfn;
}
let tcx = self.tcx;
assert!(self.sess().target.target.options.custom_unwind_resume);
if let Some(def_id) = tcx.lang_items().eh_unwind_resume() {
let llfn = resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
unwresume.set(Some(llfn));
return llfn;
}
let sig = ty::Binder::bind(tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
tcx.types.never,
false,
hir::Unsafety::Unsafe,
Abi::C
));
let llfn = self.declare_fn("rust_eh_unwind_resume", sig);
attributes::unwind(llfn, true);
attributes::apply_target_cpu_attr(self, llfn);
unwresume.set(Some(llfn));
llfn
}
fn sess(&self) -> &Session {
&self.tcx.sess
}
fn check_overflow(&self) -> bool {
self.check_overflow
}
fn stats(&self) -> &RefCell<Stats> {
&self.stats
}
fn consume_stats(self) -> RefCell<Stats> {
self.stats
}
fn codegen_unit(&self) -> &Arc<CodegenUnit<'tcx>> {
&self.codegen_unit
}
fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
&self.statics_to_rauw
}
fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
&self.used_statics
}
fn set_frame_pointer_elimination(&self, llfn: &'ll Value) {
attributes::set_frame_pointer_elimination(self, llfn)
}
fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
attributes::apply_target_cpu_attr(self, llfn)
}
fn closure_env_needs_indirect_debuginfo(&self) -> bool {
llvm_util::get_major_version() < 6
}
fn create_used_variable(&self) {
let name = const_cstr!("llvm.used");
let section = const_cstr!("llvm.metadata");
let array = self.const_array(
&self.type_ptr_to(self.type_i8()),
&*self.used_statics.borrow()
);
unsafe {
let g = llvm::LLVMAddGlobal(self.llmod,
self.val_ty(array),
name.as_ptr());
llvm::LLVMSetInitializer(g, array);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
llvm::LLVMSetSection(g, section.as_ptr());
}
}
}
impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> {
fn get_intrinsic(&self, key: &str) -> &'b Value {
if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
return v;
}
self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
}
fn declare_intrinsic(
&self,
key: &str
) -> Option<&'b Value> {
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
let f = self.declare_cfn($name, self.type_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
}
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
}
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
}
);
}
macro_rules! mk_struct {
($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
}
let i8p = self.type_i8p();
let void = self.type_void();
let i1 = self.type_i1();
let t_i8 = self.type_i8();
let t_i16 = self.type_i16();
let t_i32 = self.type_i32();
let t_i64 = self.type_i64();
let t_i128 = self.type_i128();
let t_f32 = self.type_f32();
let t_f64 = self.type_f64();
let t_v2f32 = self.type_vector(t_f32, 2);
let t_v4f32 = self.type_vector(t_f32, 4);
let t_v8f32 = self.type_vector(t_f32, 8);
let t_v16f32 = self.type_vector(t_f32, 16);
let t_v2f64 = self.type_vector(t_f64, 2);
let t_v4f64 = self.type_vector(t_f64, 4);
let t_v8f64 = self.type_vector(t_f64, 8);
ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void);
ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);
ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32);
ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32);
ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32);
ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32);
ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64);
ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64);
ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64);
ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32);
ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32);
ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32);
ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32);
ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64);
ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64);
ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64);
ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32);
ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32);
ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32);
ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32);
ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64);
ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64);
ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64);
ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8);
ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8);
ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void);
ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void);
ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
ifn!("llvm.assume", fn(i1) -> void);
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
// variadic intrinsics
ifn!("llvm.va_start", fn(i8p) -> void);
ifn!("llvm.va_end", fn(i8p) -> void);
ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
if self.sess().opts.debuginfo != DebugInfo::None {
ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void);
ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void);
}
return None;
}
}
impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
/// Generate a new symbol name with the given prefix. This symbol name must
/// only be used for definitions with `internal` or `private` linkage.
pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
let idx = self.local_gen_sym_counter.get();
self.local_gen_sym_counter.set(idx + 1);
// Include a '.' character, so there can be no accidental conflicts with
// user defined names
let mut name = String::with_capacity(prefix.len() + 6);
name.push_str(prefix);
name.push_str(".");
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}
}
impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> {
fn data_layout(&self) -> &ty::layout::TargetDataLayout {
&self.tcx.data_layout
}
}
impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
fn target_spec(&self) -> &Target {
&self.tcx.sess.target.target
}
}
impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> {
fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.tcx
}
}
impl LayoutOf for CodegenCx<'ll, 'tcx> {
type Ty = Ty<'tcx>;
type TyLayout = TyLayout<'tcx>;
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty))
.unwrap_or_else(|e| if let LayoutError::SizeOverflow(_) = e {
self.sess().fatal(&e.to_string())
} else {
bug!("failed to get layout for `{}`: {}", ty, e)
})
}
}
| 42.855792 | 98 | 0.586027 |
4875ef6d3caa55c95dc3950a4f3cfcc392071895 | 9,876 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::to_bytes;
#[deriving(Eq)]
pub enum Abi {
// NB: This ordering MUST match the AbiDatas array below.
// (This is ensured by the test indices_are_correct().)
// Single platform ABIs come first (`for_arch()` relies on this)
Cdecl,
Stdcall,
Fastcall,
Aapcs,
// Multiplatform ABIs second
Rust,
C,
RustIntrinsic,
}
#[deriving(Eq)]
pub enum Architecture {
// NB. You cannot change the ordering of these
// constants without adjusting IntelBits below.
// (This is ensured by the test indices_are_correct().)
X86,
X86_64,
Arm,
Mips
}
static IntelBits: u32 = (1 << (X86 as uint)) | (1 << (X86_64 as uint));
static ArmBits: u32 = (1 << (Arm as uint));
struct AbiData {
abi: Abi,
// Name of this ABI as we like it called.
name: &'static str,
// Is it specific to a platform? If so, which one? Also, what is
// the name that LLVM gives it (in case we disagree)
abi_arch: AbiArchitecture
}
enum AbiArchitecture {
RustArch, // Not a real ABI (e.g., intrinsic)
AllArch, // An ABI that specifies cross-platform defaults (e.g., "C")
Archs(u32) // Multiple architectures (bitset)
}
#[deriving(Clone, Eq, Encodable, Decodable)]
pub struct AbiSet {
priv bits: u32 // each bit represents one of the abis below
}
static AbiDatas: &'static [AbiData] = &[
// Platform-specific ABIs
AbiData {abi: Cdecl, name: "cdecl", abi_arch: Archs(IntelBits)},
AbiData {abi: Stdcall, name: "stdcall", abi_arch: Archs(IntelBits)},
AbiData {abi: Fastcall, name:"fastcall", abi_arch: Archs(IntelBits)},
AbiData {abi: Aapcs, name: "aapcs", abi_arch: Archs(ArmBits)},
// Cross-platform ABIs
//
// NB: Do not adjust this ordering without
// adjusting the indices below.
AbiData {abi: Rust, name: "Rust", abi_arch: RustArch},
AbiData {abi: C, name: "C", abi_arch: AllArch},
AbiData {abi: RustIntrinsic, name: "rust-intrinsic", abi_arch: RustArch},
];
fn each_abi(op: &fn(abi: Abi) -> bool) -> bool {
/*!
*
* Iterates through each of the defined ABIs.
*/
AbiDatas.iter().advance(|abi_data| op(abi_data.abi))
}
pub fn lookup(name: &str) -> Option<Abi> {
/*!
*
* Returns the ABI with the given name (if any).
*/
let mut res = None;
do each_abi |abi| {
if name == abi.data().name {
res = Some(abi);
false
} else {
true
}
};
res
}
pub fn all_names() -> ~[&'static str] {
AbiDatas.map(|d| d.name)
}
impl Abi {
#[inline]
pub fn index(&self) -> uint {
*self as uint
}
#[inline]
pub fn data(&self) -> &'static AbiData {
&AbiDatas[self.index()]
}
pub fn name(&self) -> &'static str {
self.data().name
}
}
impl Architecture {
fn bit(&self) -> u32 {
1 << (*self as u32)
}
}
impl AbiSet {
pub fn from(abi: Abi) -> AbiSet {
AbiSet { bits: (1 << abi.index()) }
}
#[inline]
pub fn Rust() -> AbiSet {
AbiSet::from(Rust)
}
#[inline]
pub fn C() -> AbiSet {
AbiSet::from(C)
}
#[inline]
pub fn Intrinsic() -> AbiSet {
AbiSet::from(RustIntrinsic)
}
pub fn default() -> AbiSet {
AbiSet::C()
}
pub fn empty() -> AbiSet {
AbiSet { bits: 0 }
}
#[inline]
pub fn is_rust(&self) -> bool {
self.bits == 1 << Rust.index()
}
#[inline]
pub fn is_c(&self) -> bool {
self.bits == 1 << C.index()
}
#[inline]
pub fn is_intrinsic(&self) -> bool {
self.bits == 1 << RustIntrinsic.index()
}
pub fn contains(&self, abi: Abi) -> bool {
(self.bits & (1 << abi.index())) != 0
}
pub fn subset_of(&self, other_abi_set: AbiSet) -> bool {
(self.bits & other_abi_set.bits) == self.bits
}
pub fn add(&mut self, abi: Abi) {
self.bits |= (1 << abi.index());
}
pub fn each(&self, op: &fn(abi: Abi) -> bool) -> bool {
each_abi(|abi| !self.contains(abi) || op(abi))
}
pub fn is_empty(&self) -> bool {
self.bits == 0
}
pub fn for_arch(&self, arch: Architecture) -> Option<Abi> {
// NB---Single platform ABIs come first
let mut res = None;
do self.each |abi| {
let data = abi.data();
match data.abi_arch {
Archs(a) if (a & arch.bit()) != 0 => { res = Some(abi); false }
Archs(_) => { true }
RustArch | AllArch => { res = Some(abi); false }
}
};
res
}
pub fn check_valid(&self) -> Option<(Abi, Abi)> {
let mut abis = ~[];
do self.each |abi| { abis.push(abi); true };
for (i, abi) in abis.iter().enumerate() {
let data = abi.data();
for other_abi in abis.slice(0, i).iter() {
let other_data = other_abi.data();
debug2!("abis=({:?},{:?}) datas=({:?},{:?})",
abi, data.abi_arch,
other_abi, other_data.abi_arch);
match (&data.abi_arch, &other_data.abi_arch) {
(&AllArch, &AllArch) => {
// Two cross-architecture ABIs
return Some((*abi, *other_abi));
}
(_, &RustArch) |
(&RustArch, _) => {
// Cannot combine Rust or Rust-Intrinsic with
// anything else.
return Some((*abi, *other_abi));
}
(&Archs(is), &Archs(js)) if (is & js) != 0 => {
// Two ABIs for same architecture
return Some((*abi, *other_abi));
}
_ => {}
}
}
}
return None;
}
}
impl to_bytes::IterBytes for Abi {
fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) -> bool {
self.index().iter_bytes(lsb0, f)
}
}
impl to_bytes::IterBytes for AbiSet {
fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) -> bool {
self.bits.iter_bytes(lsb0, f)
}
}
impl ToStr for Abi {
fn to_str(&self) -> ~str {
self.data().name.to_str()
}
}
impl ToStr for AbiSet {
fn to_str(&self) -> ~str {
let mut strs = ~[];
do self.each |abi| {
strs.push(abi.data().name);
true
};
format!("\"{}\"", strs.connect(" "))
}
}
#[test]
fn lookup_Rust() {
let abi = lookup("Rust");
assert!(abi.is_some() && abi.unwrap().data().name == "Rust");
}
#[test]
fn lookup_cdecl() {
let abi = lookup("cdecl");
assert!(abi.is_some() && abi.unwrap().data().name == "cdecl");
}
#[test]
fn lookup_baz() {
let abi = lookup("baz");
assert!(abi.is_none());
}
#[cfg(test)]
fn cannot_combine(n: Abi, m: Abi) {
let mut set = AbiSet::empty();
set.add(n);
set.add(m);
match set.check_valid() {
Some((a, b)) => {
assert!((n == a && m == b) ||
(m == a && n == b));
}
None => {
fail2!("Invalid match not detected");
}
}
}
#[cfg(test)]
fn can_combine(n: Abi, m: Abi) {
let mut set = AbiSet::empty();
set.add(n);
set.add(m);
match set.check_valid() {
Some((_, _)) => {
fail2!("Valid match declared invalid");
}
None => {}
}
}
#[test]
fn cannot_combine_cdecl_and_stdcall() {
cannot_combine(Cdecl, Stdcall);
}
#[test]
fn cannot_combine_c_and_rust() {
cannot_combine(C, Rust);
}
#[test]
fn cannot_combine_rust_and_cdecl() {
cannot_combine(Rust, Cdecl);
}
#[test]
fn cannot_combine_rust_intrinsic_and_cdecl() {
cannot_combine(RustIntrinsic, Cdecl);
}
#[test]
fn can_combine_c_and_stdcall() {
can_combine(C, Stdcall);
}
#[test]
fn can_combine_aapcs_and_stdcall() {
can_combine(Aapcs, Stdcall);
}
#[test]
fn abi_to_str_stdcall_aaps() {
let mut set = AbiSet::empty();
set.add(Aapcs);
set.add(Stdcall);
assert!(set.to_str() == ~"\"stdcall aapcs\"");
}
#[test]
fn abi_to_str_c_aaps() {
let mut set = AbiSet::empty();
set.add(Aapcs);
set.add(C);
debug2!("set = {}", set.to_str());
assert!(set.to_str() == ~"\"aapcs C\"");
}
#[test]
fn abi_to_str_rust() {
let mut set = AbiSet::empty();
set.add(Rust);
debug2!("set = {}", set.to_str());
assert!(set.to_str() == ~"\"Rust\"");
}
#[test]
fn indices_are_correct() {
for (i, abi_data) in AbiDatas.iter().enumerate() {
assert!(i == abi_data.abi.index());
}
let bits = 1 << (X86 as u32);
let bits = bits | 1 << (X86_64 as u32);
assert!(IntelBits == bits);
let bits = 1 << (Arm as u32);
assert!(ArmBits == bits);
}
#[cfg(test)]
fn check_arch(abis: &[Abi], arch: Architecture, expect: Option<Abi>) {
let mut set = AbiSet::empty();
for &abi in abis.iter() {
set.add(abi);
}
let r = set.for_arch(arch);
assert!(r == expect);
}
#[test]
fn pick_multiplatform() {
check_arch([C, Cdecl], X86, Some(Cdecl));
check_arch([C, Cdecl], X86_64, Some(Cdecl));
check_arch([C, Cdecl], Arm, Some(C));
}
#[test]
fn pick_uniplatform() {
check_arch([Stdcall], X86, Some(Stdcall));
check_arch([Stdcall], Arm, None);
}
| 23.626794 | 79 | 0.534427 |
e4b3de80698e6573eb055c965b3ce5492c7ae995 | 4,469 | use std::sync::Arc;
use arc_swap::{ArcSwap, Guard};
use crossbeam_utils::atomic::AtomicCell;
use dashmap::mapref::entry::Entry;
use crate::{
hash::FxDashMap,
runtime::local_state::{QueryInputs, QueryRevisions},
DatabaseKeyIndex, Event, EventKind, Revision, Runtime,
};
use super::DerivedKeyIndex;
pub(super) struct MemoMap<V> {
map: FxDashMap<DerivedKeyIndex, ArcSwap<Memo<V>>>,
}
impl<V> Default for MemoMap<V> {
fn default() -> Self {
Self {
map: Default::default(),
}
}
}
impl<V> MemoMap<V> {
/// Inserts the memo for the given key; (atomically) overwrites any previously existing memo.-
pub(super) fn insert(&self, key: DerivedKeyIndex, memo: Memo<V>) {
self.map.insert(key, ArcSwap::from(Arc::new(memo)));
}
/// Evicts the existing memo for the given key, replacing it
/// with an equivalent memo that has no value. If the memo
/// has untracked inputs, this has no effect.
pub(super) fn evict(&self, key: DerivedKeyIndex) {
// Nit: this function embodies a touch more "business logic"
// than I would like (specifically the check about "query-input::untracked"),
// but I can't see a clean way to encapsulate it otherwise. I suppose
// it could take a closure, but it seems silly.
match self.map.entry(key) {
Entry::Vacant(_) => return,
Entry::Occupied(entry) => {
let memo = entry.get().load();
// Careful: we can't evict memos with untracked inputs
// as their values cannot be reconstructed.
if let QueryInputs::Untracked = memo.revisions.inputs {
return;
}
let memo_evicted = Arc::new(Memo::new(
None::<V>,
memo.verified_at.load(),
memo.revisions.clone(),
));
entry.get().store(memo_evicted);
}
}
}
/// Loads the current memo for `key_index`. This does not hold any sort of
/// lock on the `memo_map` once it returns, so this memo could immediately
/// become outdated if other threads store into the `memo_map`.
pub(super) fn get(&self, key: DerivedKeyIndex) -> Option<Guard<Arc<Memo<V>>>> {
self.map.get(&key).map(|v| v.load())
}
/// Iterates over the entries in the map. This holds a read lock while iteration continues.
pub(super) fn iter(&self) -> impl Iterator<Item = (DerivedKeyIndex, Arc<Memo<V>>)> + '_ {
self.map
.iter()
.map(move |r| (*r.key(), r.value().load_full()))
}
/// Clears the memo of all entries.
pub(super) fn clear(&self) {
self.map.clear()
}
}
#[derive(Debug)]
pub(super) struct Memo<V> {
/// The result of the query, if we decide to memoize it.
pub(super) value: Option<V>,
/// Last revision when this memo was verified; this begins
/// as the current revision.
pub(super) verified_at: AtomicCell<Revision>,
/// Revision information
pub(super) revisions: QueryRevisions,
}
impl<V> Memo<V> {
pub(super) fn new(value: Option<V>, revision_now: Revision, revisions: QueryRevisions) -> Self {
Memo {
value,
verified_at: AtomicCell::new(revision_now),
revisions,
}
}
/// True if this memo is known not to have changed based on its durability.
pub(super) fn check_durability(&self, runtime: &Runtime) -> bool {
let last_changed = runtime.last_changed_revision(self.revisions.durability);
let verified_at = self.verified_at.load();
log::debug!(
"check_durability(last_changed={:?} <= verified_at={:?}) = {:?}",
last_changed,
self.verified_at,
last_changed <= verified_at,
);
last_changed <= verified_at
}
/// Mark memo as having been verified in the `revision_now`, which should
/// be the current revision.
pub(super) fn mark_as_verified(
&self,
db: &dyn crate::Database,
runtime: &crate::Runtime,
database_key_index: DatabaseKeyIndex,
) {
db.salsa_event(Event {
runtime_id: runtime.id(),
kind: EventKind::DidValidateMemoizedValue {
database_key: database_key_index,
},
});
self.verified_at.store(runtime.current_revision());
}
}
| 33.103704 | 100 | 0.591855 |
f7a4a71e5cfda4909700cd455c5b762d6a3965cd | 12,630 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::result::Result::{Ok, Err};
#[test]
fn test_binary_search() {
let b: [i32; 0] = [];
assert_eq!(b.binary_search(&5), Err(0));
let b = [4];
assert_eq!(b.binary_search(&3), Err(0));
assert_eq!(b.binary_search(&4), Ok(0));
assert_eq!(b.binary_search(&5), Err(1));
let b = [1, 2, 4, 6, 8, 9];
assert_eq!(b.binary_search(&5), Err(3));
assert_eq!(b.binary_search(&6), Ok(3));
assert_eq!(b.binary_search(&7), Err(4));
assert_eq!(b.binary_search(&8), Ok(4));
let b = [1, 2, 4, 5, 6, 8];
assert_eq!(b.binary_search(&9), Err(6));
let b = [1, 2, 4, 6, 7, 8, 9];
assert_eq!(b.binary_search(&6), Ok(3));
assert_eq!(b.binary_search(&5), Err(3));
assert_eq!(b.binary_search(&8), Ok(5));
let b = [1, 2, 4, 5, 6, 8, 9];
assert_eq!(b.binary_search(&7), Err(5));
assert_eq!(b.binary_search(&0), Err(0));
let b = [1, 3, 3, 3, 7];
assert_eq!(b.binary_search(&0), Err(0));
assert_eq!(b.binary_search(&1), Ok(0));
assert_eq!(b.binary_search(&2), Err(1));
assert!(match b.binary_search(&3) { Ok(1...3) => true, _ => false });
assert!(match b.binary_search(&3) { Ok(1...3) => true, _ => false });
assert_eq!(b.binary_search(&4), Err(4));
assert_eq!(b.binary_search(&5), Err(4));
assert_eq!(b.binary_search(&6), Err(4));
assert_eq!(b.binary_search(&7), Ok(4));
assert_eq!(b.binary_search(&8), Err(5));
}
#[test]
// Test implementation specific behavior when finding equivalent elements.
// It is ok to break this test but when you do a crater run is highly advisable.
fn test_binary_search_implementation_details() {
let b = [1, 1, 2, 2, 3, 3, 3];
assert_eq!(b.binary_search(&1), Ok(1));
assert_eq!(b.binary_search(&2), Ok(3));
assert_eq!(b.binary_search(&3), Ok(6));
let b = [1, 1, 1, 1, 1, 3, 3, 3, 3];
assert_eq!(b.binary_search(&1), Ok(4));
assert_eq!(b.binary_search(&3), Ok(8));
let b = [1, 1, 1, 1, 3, 3, 3, 3, 3];
assert_eq!(b.binary_search(&1), Ok(3));
assert_eq!(b.binary_search(&3), Ok(8));
}
#[test]
fn test_iterator_nth() {
let v: &[_] = &[0, 1, 2, 3, 4];
for i in 0..v.len() {
assert_eq!(v.iter().nth(i).unwrap(), &v[i]);
}
assert_eq!(v.iter().nth(v.len()), None);
let mut iter = v.iter();
assert_eq!(iter.nth(2).unwrap(), &v[2]);
assert_eq!(iter.nth(1).unwrap(), &v[4]);
}
#[test]
fn test_iterator_last() {
let v: &[_] = &[0, 1, 2, 3, 4];
assert_eq!(v.iter().last().unwrap(), &4);
assert_eq!(v[..1].iter().last().unwrap(), &0);
}
#[test]
fn test_iterator_count() {
let v: &[_] = &[0, 1, 2, 3, 4];
assert_eq!(v.iter().count(), 5);
let mut iter2 = v.iter();
iter2.next();
iter2.next();
assert_eq!(iter2.count(), 3);
}
#[test]
fn test_chunks_count() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let c = v.chunks(3);
assert_eq!(c.count(), 2);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let c2 = v2.chunks(2);
assert_eq!(c2.count(), 3);
let v3: &[i32] = &[];
let c3 = v3.chunks(2);
assert_eq!(c3.count(), 0);
}
#[test]
fn test_chunks_nth() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let mut c = v.chunks(2);
assert_eq!(c.nth(1).unwrap(), &[2, 3]);
assert_eq!(c.next().unwrap(), &[4, 5]);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let mut c2 = v2.chunks(3);
assert_eq!(c2.nth(1).unwrap(), &[3, 4]);
assert_eq!(c2.next(), None);
}
#[test]
fn test_chunks_last() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let c = v.chunks(2);
assert_eq!(c.last().unwrap()[1], 5);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let c2 = v2.chunks(2);
assert_eq!(c2.last().unwrap()[0], 4);
}
#[test]
fn test_chunks_zip() {
let v1: &[i32] = &[0, 1, 2, 3, 4];
let v2: &[i32] = &[6, 7, 8, 9, 10];
let res = v1.chunks(2)
.zip(v2.chunks(2))
.map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
.collect::<Vec<_>>();
assert_eq!(res, vec![14, 22, 14]);
}
#[test]
fn test_chunks_mut_count() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
let c = v.chunks_mut(3);
assert_eq!(c.count(), 2);
let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
let c2 = v2.chunks_mut(2);
assert_eq!(c2.count(), 3);
let v3: &mut [i32] = &mut [];
let c3 = v3.chunks_mut(2);
assert_eq!(c3.count(), 0);
}
#[test]
fn test_chunks_mut_nth() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
let mut c = v.chunks_mut(2);
assert_eq!(c.nth(1).unwrap(), &[2, 3]);
assert_eq!(c.next().unwrap(), &[4, 5]);
let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
let mut c2 = v2.chunks_mut(3);
assert_eq!(c2.nth(1).unwrap(), &[3, 4]);
assert_eq!(c2.next(), None);
}
#[test]
fn test_chunks_mut_last() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
let c = v.chunks_mut(2);
assert_eq!(c.last().unwrap(), &[4, 5]);
let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
let c2 = v2.chunks_mut(2);
assert_eq!(c2.last().unwrap(), &[4]);
}
#[test]
fn test_chunks_mut_zip() {
let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
let v2: &[i32] = &[6, 7, 8, 9, 10];
for (a, b) in v1.chunks_mut(2).zip(v2.chunks(2)) {
let sum = b.iter().sum::<i32>();
for v in a {
*v += sum;
}
}
assert_eq!(v1, [13, 14, 19, 20, 14]);
}
#[test]
fn test_exact_chunks_count() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let c = v.exact_chunks(3);
assert_eq!(c.count(), 2);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let c2 = v2.exact_chunks(2);
assert_eq!(c2.count(), 2);
let v3: &[i32] = &[];
let c3 = v3.exact_chunks(2);
assert_eq!(c3.count(), 0);
}
#[test]
fn test_exact_chunks_nth() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let mut c = v.exact_chunks(2);
assert_eq!(c.nth(1).unwrap(), &[2, 3]);
assert_eq!(c.next().unwrap(), &[4, 5]);
let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
let mut c2 = v2.exact_chunks(3);
assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
assert_eq!(c2.next(), None);
}
#[test]
fn test_exact_chunks_last() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let c = v.exact_chunks(2);
assert_eq!(c.last().unwrap(), &[4, 5]);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let c2 = v2.exact_chunks(2);
assert_eq!(c2.last().unwrap(), &[2, 3]);
}
#[test]
fn test_exact_chunks_zip() {
let v1: &[i32] = &[0, 1, 2, 3, 4];
let v2: &[i32] = &[6, 7, 8, 9, 10];
let res = v1.exact_chunks(2)
.zip(v2.exact_chunks(2))
.map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
.collect::<Vec<_>>();
assert_eq!(res, vec![14, 22]);
}
#[test]
fn test_exact_chunks_mut_count() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
let c = v.exact_chunks_mut(3);
assert_eq!(c.count(), 2);
let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
let c2 = v2.exact_chunks_mut(2);
assert_eq!(c2.count(), 2);
let v3: &mut [i32] = &mut [];
let c3 = v3.exact_chunks_mut(2);
assert_eq!(c3.count(), 0);
}
#[test]
fn test_exact_chunks_mut_nth() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
let mut c = v.exact_chunks_mut(2);
assert_eq!(c.nth(1).unwrap(), &[2, 3]);
assert_eq!(c.next().unwrap(), &[4, 5]);
let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
let mut c2 = v2.exact_chunks_mut(3);
assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
assert_eq!(c2.next(), None);
}
#[test]
fn test_exact_chunks_mut_last() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
let c = v.exact_chunks_mut(2);
assert_eq!(c.last().unwrap(), &[4, 5]);
let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
let c2 = v2.exact_chunks_mut(2);
assert_eq!(c2.last().unwrap(), &[2, 3]);
}
#[test]
fn test_exact_chunks_mut_zip() {
let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
let v2: &[i32] = &[6, 7, 8, 9, 10];
for (a, b) in v1.exact_chunks_mut(2).zip(v2.exact_chunks(2)) {
let sum = b.iter().sum::<i32>();
for v in a {
*v += sum;
}
}
assert_eq!(v1, [13, 14, 19, 20, 4]);
}
#[test]
fn test_windows_count() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let c = v.windows(3);
assert_eq!(c.count(), 4);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let c2 = v2.windows(6);
assert_eq!(c2.count(), 0);
let v3: &[i32] = &[];
let c3 = v3.windows(2);
assert_eq!(c3.count(), 0);
}
#[test]
fn test_windows_nth() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let mut c = v.windows(2);
assert_eq!(c.nth(2).unwrap()[1], 3);
assert_eq!(c.next().unwrap()[0], 3);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let mut c2 = v2.windows(4);
assert_eq!(c2.nth(1).unwrap()[1], 2);
assert_eq!(c2.next(), None);
}
#[test]
fn test_windows_last() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
let c = v.windows(2);
assert_eq!(c.last().unwrap()[1], 5);
let v2: &[i32] = &[0, 1, 2, 3, 4];
let c2 = v2.windows(2);
assert_eq!(c2.last().unwrap()[0], 3);
}
#[test]
fn test_windows_zip() {
let v1: &[i32] = &[0, 1, 2, 3, 4];
let v2: &[i32] = &[6, 7, 8, 9, 10];
let res = v1.windows(2)
.zip(v2.windows(2))
.map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
.collect::<Vec<_>>();
assert_eq!(res, [14, 18, 22, 26]);
}
#[test]
fn get_range() {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
assert_eq!(v.get(..), Some(&[0, 1, 2, 3, 4, 5][..]));
assert_eq!(v.get(..2), Some(&[0, 1][..]));
assert_eq!(v.get(2..), Some(&[2, 3, 4, 5][..]));
assert_eq!(v.get(1..4), Some(&[1, 2, 3][..]));
assert_eq!(v.get(7..), None);
assert_eq!(v.get(7..10), None);
}
#[test]
fn get_mut_range() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
assert_eq!(v.get_mut(..), Some(&mut [0, 1, 2, 3, 4, 5][..]));
assert_eq!(v.get_mut(..2), Some(&mut [0, 1][..]));
assert_eq!(v.get_mut(2..), Some(&mut [2, 3, 4, 5][..]));
assert_eq!(v.get_mut(1..4), Some(&mut [1, 2, 3][..]));
assert_eq!(v.get_mut(7..), None);
assert_eq!(v.get_mut(7..10), None);
}
#[test]
fn get_unchecked_range() {
unsafe {
let v: &[i32] = &[0, 1, 2, 3, 4, 5];
assert_eq!(v.get_unchecked(..), &[0, 1, 2, 3, 4, 5][..]);
assert_eq!(v.get_unchecked(..2), &[0, 1][..]);
assert_eq!(v.get_unchecked(2..), &[2, 3, 4, 5][..]);
assert_eq!(v.get_unchecked(1..4), &[1, 2, 3][..]);
}
}
#[test]
fn get_unchecked_mut_range() {
unsafe {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
assert_eq!(v.get_unchecked_mut(..), &mut [0, 1, 2, 3, 4, 5][..]);
assert_eq!(v.get_unchecked_mut(..2), &mut [0, 1][..]);
assert_eq!(v.get_unchecked_mut(2..), &mut[2, 3, 4, 5][..]);
assert_eq!(v.get_unchecked_mut(1..4), &mut [1, 2, 3][..]);
}
}
#[test]
fn test_find_rfind() {
let v = [0, 1, 2, 3, 4, 5];
let mut iter = v.iter();
let mut i = v.len();
while let Some(&elt) = iter.rfind(|_| true) {
i -= 1;
assert_eq!(elt, v[i]);
}
assert_eq!(i, 0);
assert_eq!(v.iter().rfind(|&&x| x <= 3), Some(&3));
}
#[test]
fn test_iter_folds() {
let a = [1, 2, 3, 4, 5]; // len>4 so the unroll is used
assert_eq!(a.iter().fold(0, |acc, &x| 2*acc + x), 57);
assert_eq!(a.iter().rfold(0, |acc, &x| 2*acc + x), 129);
let fold = |acc: i32, &x| acc.checked_mul(2)?.checked_add(x);
assert_eq!(a.iter().try_fold(0, &fold), Some(57));
assert_eq!(a.iter().try_rfold(0, &fold), Some(129));
// short-circuiting try_fold, through other methods
let a = [0, 1, 2, 3, 5, 5, 5, 7, 8, 9];
let mut iter = a.iter();
assert_eq!(iter.position(|&x| x == 3), Some(3));
assert_eq!(iter.rfind(|&&x| x == 5), Some(&5));
assert_eq!(iter.len(), 2);
}
#[test]
fn test_rotate_left() {
const N: usize = 600;
let a: &mut [_] = &mut [0; N];
for i in 0..N {
a[i] = i;
}
a.rotate_left(42);
let k = N - 42;
for i in 0..N {
assert_eq!(a[(i + k) % N], i);
}
}
#[test]
fn test_rotate_right() {
const N: usize = 600;
let a: &mut [_] = &mut [0; N];
for i in 0..N {
a[i] = i;
}
a.rotate_right(42);
for i in 0..N {
assert_eq!(a[(i + 42) % N], i);
}
}
| 27.16129 | 80 | 0.512668 |
649a899be7e6a7dd12171521a2ef53afb1a2c5d0 | 2,338 | use crate::config::Keybind;
use crate::models::TagId;
use crate::models::Window;
use crate::models::WindowHandle;
use crate::models::WindowState;
use crate::utils::xkeysym_lookup::Button;
use serde::{Deserialize, Serialize};
/// These are responses from the Window manager.
/// The display server should act on these actions.
#[allow(clippy::large_enum_variant)]
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum DisplayAction {
/// Nicely ask a window if it would please close at its convenience.
KillWindow(WindowHandle),
/// Get triggered after a new window is discovered and WE are
/// managing it.
AddedWindow(WindowHandle, bool, bool),
/// Makes sure the mouse is over a given window.
MoveMouseOver(WindowHandle, bool),
/// Makes sure the mouse is over a given point.
MoveMouseOverPoint((i32, i32)),
/// Change a windows state.
SetState(WindowHandle, bool, WindowState),
/// Sets the "z-index" order of the windows
/// first in the array is top most
SetWindowOrder(Vec<WindowHandle>, Vec<WindowHandle>),
/// Raises a given window.
MoveToTop(WindowHandle),
/// Tell the DS we no longer care about the this window and other
/// cleanup.
DestroyedWindow(WindowHandle),
/// Tell a window that it is to become focused.
WindowTakeFocus {
window: Window,
previous_window: Option<Window>,
},
/// Remove focus on any visible window by focusing the root window.
Unfocus(Option<WindowHandle>, bool),
/// To the window under the cursor to take the focus.
FocusWindowUnderCursor,
ReplayClick(WindowHandle, Button),
/// Tell the DM we are ready to resize this window.
ReadyToResizeWindow(WindowHandle),
/// Tell the DM we are ready to move this window.
ReadyToMoveWindow(WindowHandle),
/// Used to let the WM know of the current displayed tag changes.
SetCurrentTags(Vec<TagId>),
/// Used to let the WM know of the tag for a given window.
SetWindowTags(WindowHandle, Vec<TagId>),
/// Tell the DM to return to normal mode if it is not (ie resize a
/// window or moving a window).
NormalMode,
/// SoftReload keygrabs, needed when keyboard changes.
ReloadKeyGrabs(Vec<Keybind>),
/// Configure a xlib window.
ConfigureXlibWindow(Window),
}
| 30.363636 | 72 | 0.691617 |
cc7199608939797504aeb9f0005b2885689c4ee0 | 3,701 | use super::{Collide};
use crate::math::{Vec2, clamp01};
use crate::shapes::{Circle, Polygon};
use crate::world::{Body, Transform};
use crate::collision::{Contact};
impl Circle {
/// Returns the face for which the penetration of the circle is least, the penetration and the corresponding
/// support point on the circle.
fn least_penetration_support_point(&self, self_transform: &Transform,
other: &Polygon, other_transform: &Transform) -> (usize, f32, Vec2) {
use std::f32::INFINITY;
let mut face_index = 0usize;
let mut min_pen = INFINITY;
let mut min_support = Vec2::ZERO;
let self_local_pos = other_transform.local_pos(&self_transform.position);
for i in 0..other.vert_count() {
// Vertex and normal describing ith face of other
let normal = other.normals[i];
let vertex = other.vertices[i];
// Point on self furthest below the face
let support = self_local_pos - normal * self.radius;
// Penetration wrt this face is negative of the distance of support from this face
let penetration = -normal.dot(&(support - vertex));
if penetration < min_pen {
min_pen = penetration;
face_index = i;
min_support = support;
}
}
(face_index, min_pen, min_support)
}
}
impl Collide<Polygon> for Circle {
fn collide(&self, self_body: &Body, other: &Polygon, other_body: &Body) -> Option<Vec<Contact>> {
let self_transform = &self_body.transform;
let other_transform = &other_body.transform;
let (face_idx, mut penetration, support) =
self.least_penetration_support_point(self_transform, other, other_transform);
if penetration < 0.0 {
return None;
}
let face = other.face(face_idx);
// The contact point is the clamped projection of support on face
let face_vec = face.b - face.a;
let t = (support - face.a).dot(&face_vec) / face_vec.sqr_len();
let corner_contact = t < 0.0 || t > 1.0;
let t = clamp01(t);
let contact_point = face.a + face_vec * t;
let contact_point = other_transform.world_pos(&contact_point);
let rel_contact_point = contact_point - self_transform.position;
let contact_dist_sqr = rel_contact_point.sqr_len();
if contact_dist_sqr > self.radius * self.radius {
return None;
}
if corner_contact {
penetration = self.radius - contact_dist_sqr.sqrt()
}
let normal = if corner_contact {
rel_contact_point / (self.radius - penetration)
} else {
-other_transform.world_dir(&face.normal)
};
let contact = Contact::new(contact_point, penetration, normal);
Some(vec![contact])
}
}
impl Collide<Circle> for Polygon {
fn collide(&self, self_body: &Body, other: &Circle, other_body: &Body) -> Option<Vec<Contact>> {
let contacts = other.collide(other_body, self, self_body);
if contacts.is_none() {
contacts
} else {
// Normal must always point from self to other
let mut contacts = contacts.unwrap();
for contact in contacts.iter_mut() {
contact.normal = -contact.normal;
}
Some(contacts)
}
}
} | 34.268519 | 112 | 0.562551 |
e87829d2da45eaf82f53b0184ef20d2e67ca769f | 3,066 | use crate::{
ast::*,
parser::{combinator::*, exchange::*, token::*},
};
use nom::{branch::alt, Parser};
/// data_section = `DATA` \[ `(` [parameter_list] `)` \] `;` [entity_instance_list] `ENDSEC;` .
pub fn data_section(input: &str) -> ParseResult<DataSection> {
tuple_((
tag_("DATA"),
opt_(tuple_((char_('('), parameter_list, char_(')')))),
char_(';'),
entity_instance_list,
tag_("ENDSEC;"),
))
.map(|(_start, meta, _semicolon, entities, _end)| DataSection {
meta: meta
.map(|(_open, params, _close)| params)
.unwrap_or_default(),
entities,
})
.parse(input)
}
/// entity_instance_list = { [entity_instance] } .
pub fn entity_instance_list(input: &str) -> ParseResult<Vec<EntityInstance>> {
many0_(entity_instance).parse(input)
}
/// entity_instance = [simple_entity_instance] | [complex_entity_instance] .
pub fn entity_instance(input: &str) -> ParseResult<EntityInstance> {
alt((simple_entity_instance, complex_entity_instance)).parse(input)
}
/// simple_entity_instance = [entity_instance_name] `=` [simple_record] `;` .
pub fn simple_entity_instance(input: &str) -> ParseResult<EntityInstance> {
tuple_((entity_instance_name, char_('='), simple_record, char_(';')))
.map(|(id, _eq, record, _semicolon)| EntityInstance::Simple { id, record })
.parse(input)
}
/// complex_entity_instance = [entity_instance_name] `=` [subsuper_record] `;` .
pub fn complex_entity_instance(input: &str) -> ParseResult<EntityInstance> {
tuple_((
entity_instance_name,
char_('='),
subsuper_record,
char_(';'),
))
.map(|(id, _eq, subsuper, _semicolon)| EntityInstance::Complex { id, subsuper })
.parse(input)
}
/// simple_record = [keyword] `(` \[ [parameter_list] \] `)` .
pub fn simple_record(input: &str) -> ParseResult<Record> {
tuple_((keyword, char_('('), opt_(parameter_list), char_(')')))
.map(|(name, _open, parameters, _close)| Record {
name,
parameters: parameters.unwrap_or_default(),
})
.parse(input)
}
/// simple_record_list = [simple_record] { [simple_record] } .
pub fn simple_record_list(input: &str) -> ParseResult<Vec<Record>> {
many0_(simple_record).parse(input)
}
/// subsuper_record = `(` [simple_record_list] `)` .
pub fn subsuper_record(input: &str) -> ParseResult<Vec<Record>> {
tuple_((char_('('), simple_record_list, char_(')')))
.map(|(_open, records, _close)| records)
.parse(input)
}
#[cfg(test)]
mod tests {
use nom::Finish;
#[test]
fn simple_recode1() {
let (res, record) = super::simple_record("A(1, 2.0)").finish().unwrap();
dbg!(record);
assert_eq!(res, "");
}
#[test]
fn simple_recode2() {
let (res, record) = super::simple_record(
"LENGTH_MEASURE_WITH_UNIT( LENGTH_MEASURE( 1.00000000000000 ), #359 )",
)
.finish()
.unwrap();
dbg!(record);
assert_eq!(res, "");
}
}
| 31.285714 | 95 | 0.605349 |
cc169f7deb1011fd8980c76e19b686aa38e527a9 | 9,804 | //! Example program for testing rendering with skribo.
use std::env;
use std::fs::File;
use std::io::Write;
use std::ops::Range;
use euclid::{Point2D, Size2D};
use font_kit::canvas::{Canvas, Format, RasterizationOptions};
use font_kit::family_name::FamilyName;
use font_kit::hinting::HintingOptions;
use font_kit::properties::Properties;
use font_kit::loader::FontTransform;
use font_kit::source::SystemSource;
use skribo::{
layout, layout_run, make_layout, FontCollection, FontFamily, FontRef, Layout, LayoutSession,
TextStyle,
};
#[cfg(target_family = "windows")]
const DEVANAGARI_FONT_POSTSCRIPT_NAME: &str = "NirmalaUI";
#[cfg(target_os = "macos")]
const DEVANAGARI_FONT_POSTSCRIPT_NAME: &str = "DevanagariUI";
#[cfg(target_os = "linux")]
const DEVANAGARI_FONT_POSTSCRIPT_NAME: &str = "NotoSerifDevanagari";
struct SimpleSurface {
width: usize,
height: usize,
pixels: Vec<u8>,
}
fn composite(a: u8, b: u8) -> u8 {
let y = ((255 - a) as u16) * ((255 - b) as u16);
let y = (y + (y >> 8) + 0x80) >> 8; // fast approx to round(y / 255)
255 - (y as u8)
}
// A simple drawing surface, just because it's easier to implement such things
// directly than pull in dependencies for it.
impl SimpleSurface {
fn new(width: usize, height: usize) -> SimpleSurface {
let pixels = vec![0; width * height];
SimpleSurface {
width,
height,
pixels,
}
}
fn paint_from_canvas(&mut self, canvas: &Canvas, x: i32, y: i32) {
let (cw, ch) = (canvas.size.width as i32, canvas.size.height as i32);
let (w, h) = (self.width as i32, self.height as i32);
let y = y - ch;
let xmin = 0.max(-x);
let xmax = cw.min(w - x);
let ymin = 0.max(-y);
let ymax = ch.min(h - y);
for yy in ymin..(ymax.max(ymin)) {
for xx in xmin..(xmax.max(xmin)) {
let pix = canvas.pixels[(cw * yy + xx) as usize];
let dst_ix = ((y + yy) * w + x + xx) as usize;
self.pixels[dst_ix] = composite(self.pixels[dst_ix], pix);
}
}
}
fn write_pgm(&self, filename: &str) -> Result<(), std::io::Error> {
let mut f = File::create(filename)?;
write!(f, "P5\n{} {}\n255\n", self.width, self.height)?;
f.write(&self.pixels)?;
Ok(())
}
fn paint_layout(&mut self, layout: &Layout, x: i32, y: i32) {
for glyph in &layout.glyphs {
let glyph_id = glyph.glyph_id;
let glyph_x = (glyph.offset.x as i32) + x;
let glyph_y = (glyph.offset.y as i32) + y;
let bounds = glyph
.font
.font
.raster_bounds(
glyph_id,
layout.size,
&FontTransform::identity(),
&Point2D::zero(),
HintingOptions::None,
RasterizationOptions::GrayscaleAa,
)
.unwrap();
println!(
"glyph {}, bounds {:?}, {},{}",
glyph_id, bounds, glyph_x, glyph_y
);
if !bounds.is_empty() {
let origin_adj = bounds.origin.to_f32();
let neg_origin = Point2D::new(-origin_adj.x, -origin_adj.y);
let mut canvas = Canvas::new(
// Not sure why we need to add the extra pixel of height, probably a rounding isssue.
// In any case, seems to get the job done (with CoreText rendering, anyway).
&Size2D::new(bounds.size.width as u32, 1 + bounds.size.height as u32),
Format::A8,
);
glyph
.font
.font
.rasterize_glyph(
&mut canvas,
glyph_id,
// TODO(font-kit): this is missing anamorphic and skew features
layout.size,
&FontTransform::identity(),
&neg_origin,
HintingOptions::None,
RasterizationOptions::GrayscaleAa,
)
.unwrap();
self.paint_from_canvas(
&canvas,
glyph_x + bounds.origin.x,
glyph_y - bounds.origin.y,
);
}
}
}
fn paint_layout_session<S: AsRef<str>>(
&mut self,
layout: &mut LayoutSession<S>,
x: i32,
y: i32,
range: Range<usize>,
) {
for run in layout.iter_substr(range) {
let font = run.font();
let size = 32.0; // TODO: probably should get this from run
println!("run, font = {:?}", font);
for glyph in run.glyphs() {
let glyph_id = glyph.glyph_id;
let glyph_x = (glyph.offset.x as i32) + x;
let glyph_y = (glyph.offset.y as i32) + y;
let bounds = font
.font
.raster_bounds(
glyph_id,
size,
&FontTransform::identity(),
&Point2D::zero(),
HintingOptions::None,
RasterizationOptions::GrayscaleAa,
)
.unwrap();
println!(
"glyph {}, bounds {:?}, {},{}",
glyph_id, bounds, glyph_x, glyph_y
);
if !bounds.is_empty() {
let origin_adj = bounds.origin.to_f32();
let neg_origin = Point2D::new(-origin_adj.x, -origin_adj.y);
let mut canvas = Canvas::new(
// Not sure why we need to add the extra pixel of height, probably a rounding isssue.
// In any case, seems to get the job done (with CoreText rendering, anyway).
&Size2D::new(bounds.size.width as u32, 1 + bounds.size.height as u32),
Format::A8,
);
font.font
.rasterize_glyph(
&mut canvas,
glyph_id,
// TODO(font-kit): this is missing anamorphic and skew features
size,
&FontTransform::identity(),
&neg_origin,
HintingOptions::None,
RasterizationOptions::GrayscaleAa,
)
.unwrap();
self.paint_from_canvas(
&canvas,
glyph_x + bounds.origin.x,
glyph_y - bounds.origin.y,
);
}
println!("glyph {} @ {:?}", glyph.glyph_id, glyph.offset);
}
}
}
}
fn make_collection() -> FontCollection {
let mut collection = FontCollection::new();
let source = SystemSource::new();
let font = source
.select_best_match(&[FamilyName::SansSerif], &Properties::new())
.unwrap()
.load()
.unwrap();
collection.add_family(FontFamily::new_from_font(font));
let font = source
.select_by_postscript_name(DEVANAGARI_FONT_POSTSCRIPT_NAME)
.expect("failed to select Devanagari font")
.load()
.unwrap();
collection.add_family(FontFamily::new_from_font(font));
collection
}
fn main() {
let font = SystemSource::new()
.select_best_match(&[FamilyName::SansSerif], &Properties::new())
.unwrap()
.load()
.unwrap();
let data = font.copy_font_data();
println!("font data: {:?} bytes", data.map(|d| d.len()));
let style = TextStyle { size: 32.0 };
let glyph_id = font.glyph_for_char('O').unwrap();
println!("glyph id = {}", glyph_id);
println!(
"glyph typo bounds: {:?}",
font.typographic_bounds(glyph_id).unwrap()
);
println!(
"glyph raster bounds: {:?}",
font.raster_bounds(
glyph_id,
32.0,
&FontTransform::identity(),
&Point2D::zero(),
HintingOptions::None,
RasterizationOptions::GrayscaleAa
)
);
let mut canvas = Canvas::new(&Size2D::new(32, 32), Format::A8);
font.rasterize_glyph(
&mut canvas,
glyph_id,
// TODO(font-kit): this is missing anamorphic and skew features
style.size,
&FontTransform::identity(),
&Point2D::zero(),
HintingOptions::None,
RasterizationOptions::GrayscaleAa,
)
.unwrap();
// TODO(font-kit): FreeType is top-aligned, CoreText is bottom-aligned, and FT seems to ignore origin
font.rasterize_glyph(
&mut canvas,
glyph_id,
style.size,
&FontTransform::identity(),
&Point2D::new(16.0, 16.0),
HintingOptions::None,
RasterizationOptions::GrayscaleAa,
)
.unwrap();
let mut args = std::env::args();
args.next();
let text = args
.next()
.unwrap_or("Hello हिन्दी".to_string());
//let layout = make_layout(&style, &font, &text);
let collection = make_collection();
/*
let layout = layout(&style, &collection, &text);
println!("{:?}", layout);
*/
let mut layout = LayoutSession::create(&text, &style, &collection);
let mut surface = SimpleSurface::new(200, 50);
surface.paint_layout_session(&mut layout, 0, 35, 0..text.len());
surface.write_pgm("out.pgm").unwrap();
}
| 35.014286 | 109 | 0.500612 |
8f990bc2a5b1f331129abcb0d8919560ee8719dd | 4,160 | use crate::error::NftVoterError;
use crate::{id, state::*};
use anchor_lang::prelude::*;
use anchor_lang::Accounts;
use itertools::Itertools;
use spl_governance_tools::account::create_and_serialize_account_signed;
/// Casts NFT vote. The NFTs used for voting are tracked using NftVoteRecord accounts
/// This instruction updates VoterWeightRecord which is valid for the current Slot and the target Proposal only
/// and hance the instruction has to be executed inside the same transaction as spl-gov.CastVote
///
/// CastNftVote instruction and NftVoteRecord are not directional. They don't record vote choice (ex Yes/No)
/// VoteChoice is recorded by spl-gov in VoteRecord and this CastNftVote only tracks voting NFTs
///
#[derive(Accounts)]
#[instruction(proposal: Pubkey)]
pub struct CastNftVote<'info> {
/// The NFT voting registrar
pub registrar: Account<'info, Registrar>,
#[account(
mut,
constraint = voter_weight_record.realm == registrar.realm
@ NftVoterError::InvalidVoterWeightRecordRealm,
constraint = voter_weight_record.governing_token_mint == registrar.governing_token_mint
@ NftVoterError::InvalidVoterWeightRecordMint,
)]
pub voter_weight_record: Account<'info, VoterWeightRecord>,
/// The token owner who casts the vote
#[account(
address = voter_weight_record.governing_token_owner @ NftVoterError::InvalidTokenOwnerForVoterWeightRecord
)]
pub governing_token_owner: Signer<'info>,
/// The account which pays for the transaction
#[account(mut)]
pub payer: Signer<'info>,
pub system_program: Program<'info, System>,
}
/// Casts vote with the NFT
pub fn cast_nft_vote<'a, 'b, 'c, 'info>(
ctx: Context<'a, 'b, 'c, 'info, CastNftVote<'info>>,
proposal: Pubkey,
) -> Result<()> {
let registrar = &ctx.accounts.registrar;
let governing_token_owner = &ctx.accounts.governing_token_owner.key();
let mut voter_weight = 0u64;
// Ensure all voting nfts in the batch are unique
let mut unique_nft_mints = vec![];
let rent = Rent::get()?;
for (nft_info, nft_metadata_info, nft_vote_record_info) in
ctx.remaining_accounts.iter().tuples()
{
let (nft_vote_weight, nft_mint) = resolve_nft_vote_weight_and_mint(
registrar,
governing_token_owner,
nft_info,
nft_metadata_info,
&mut unique_nft_mints,
)?;
voter_weight = voter_weight.checked_add(nft_vote_weight as u64).unwrap();
// Create NFT vote record to ensure the same NFT hasn't been already used for voting
// Note: The correct PDA of the NftVoteRecord is validated in create_and_serialize_account_signed
// It ensures the NftVoteRecord is for ('nft-vote-record',proposal,nft_mint) seeds
require!(
nft_vote_record_info.data_is_empty(),
NftVoterError::NftAlreadyVoted
);
let nft_vote_record = NftVoteRecord {
account_discriminator: NftVoteRecord::ACCOUNT_DISCRIMINATOR,
proposal,
nft_mint,
governing_token_owner: *governing_token_owner,
reserved: [0; 8],
};
// Anchor doesn't natively support dynamic account creation using remaining_accounts
// and we have to take it on the manual drive
create_and_serialize_account_signed(
&ctx.accounts.payer.to_account_info(),
nft_vote_record_info,
&nft_vote_record,
&get_nft_vote_record_seeds(&proposal, &nft_mint),
&id(),
&ctx.accounts.system_program.to_account_info(),
&rent,
)?;
}
let voter_weight_record = &mut ctx.accounts.voter_weight_record;
voter_weight_record.voter_weight = voter_weight;
// The record is only valid as of the current slot
voter_weight_record.voter_weight_expiry = Some(Clock::get()?.slot);
// The record is only valid for casting vote on the given Proposal
voter_weight_record.weight_action = Some(VoterWeightAction::CastVote);
voter_weight_record.weight_action_target = Some(proposal);
Ok(())
}
| 36.491228 | 114 | 0.688462 |
1a184203da238e0e8ffff4e16e93b74c65b8209e | 157,876 | #[allow(deprecated)]
use {
crate::{allocator_bump::BpfAllocator, BpfError},
solana_program_runtime::{
ic_logger_msg, ic_msg,
invoke_context::{visit_each_account_once, ComputeMeter, InvokeContext},
stable_log,
timings::ExecuteTimings,
},
solana_rbpf::{
aligned_memory::AlignedMemory,
ebpf,
error::EbpfError,
memory_region::{AccessType, MemoryMapping},
question_mark,
vm::{EbpfVm, SyscallObject, SyscallRegistry},
},
solana_sdk::{
account::{ReadableAccount, WritableAccount},
account_info::AccountInfo,
blake3, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable,
entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS},
feature_set::{
add_get_processed_sibling_instruction_syscall, blake3_syscall_enabled,
check_physical_overlapping, check_slice_translation_size, disable_fees_sysvar,
do_support_realloc, executables_incur_cpi_data_cost, fixed_memcpy_nonoverlapping_check,
libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id,
prevent_calling_precompiles_as_programs, return_data_syscall_enabled,
secp256k1_recover_syscall_enabled, sol_log_data_syscall_enabled,
syscall_saturated_math, update_syscall_base_costs, zk_token_sdk_enabled,
},
hash::{Hasher, HASH_BYTES},
instruction::{
AccountMeta, Instruction, InstructionError, ProcessedSiblingInstruction,
TRANSACTION_LEVEL_STACK_HEIGHT,
},
keccak, native_loader,
precompiles::is_precompile,
program::MAX_RETURN_DATA,
program_stubs::is_nonoverlapping,
pubkey::{Pubkey, PubkeyError, MAX_SEEDS, MAX_SEED_LEN},
secp256k1_recover::{
Secp256k1RecoverError, SECP256K1_PUBLIC_KEY_LENGTH, SECP256K1_SIGNATURE_LENGTH,
},
sysvar::{Sysvar, SysvarId},
transaction_context::InstructionAccount,
},
std::{
alloc::Layout,
cell::{Ref, RefCell, RefMut},
mem::{align_of, size_of},
rc::Rc,
slice::from_raw_parts_mut,
str::{from_utf8, Utf8Error},
sync::Arc,
},
thiserror::Error as ThisError,
};
/// Maximum signers
pub const MAX_SIGNERS: usize = 16;
/// Error definitions
#[derive(Debug, ThisError, PartialEq)]
pub enum SyscallError {
#[error("{0}: {1:?}")]
InvalidString(Utf8Error, Vec<u8>),
#[error("BPF program panicked")]
Abort,
#[error("BPF program Panicked in {0} at {1}:{2}")]
Panic(String, u64, u64),
#[error("Cannot borrow invoke context")]
InvokeContextBorrowFailed,
#[error("Malformed signer seed: {0}: {1:?}")]
MalformedSignerSeed(Utf8Error, Vec<u8>),
#[error("Could not create program address with signer seeds: {0}")]
BadSeeds(PubkeyError),
#[error("Program {0} not supported by inner instructions")]
ProgramNotSupported(Pubkey),
#[error("{0}")]
InstructionError(InstructionError),
#[error("Unaligned pointer")]
UnalignedPointer,
#[error("Too many signers")]
TooManySigners,
#[error("Instruction passed to inner instruction is too large ({0} > {1})")]
InstructionTooLarge(usize, usize),
#[error("Too many accounts passed to inner instruction")]
TooManyAccounts,
#[error("Overlapping copy")]
CopyOverlapping,
#[error("Return data too large ({0} > {1})")]
ReturnDataTooLarge(u64, u64),
#[error("Hashing too many sequences")]
TooManySlices,
#[error("InvalidLength")]
InvalidLength,
}
impl From<SyscallError> for EbpfError<BpfError> {
fn from(error: SyscallError) -> Self {
EbpfError::UserError(error.into())
}
}
trait SyscallConsume {
fn consume(&mut self, amount: u64) -> Result<(), EbpfError<BpfError>>;
}
impl SyscallConsume for Rc<RefCell<ComputeMeter>> {
fn consume(&mut self, amount: u64) -> Result<(), EbpfError<BpfError>> {
self.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed)?
.consume(amount)
.map_err(SyscallError::InstructionError)?;
Ok(())
}
}
macro_rules! register_feature_gated_syscall {
($syscall_registry:expr, $is_feature_active:expr, $name:expr, $init:expr, $call:expr $(,)?) => {
if $is_feature_active {
$syscall_registry.register_syscall_by_name($name, $init, $call)
} else {
Ok(())
}
};
}
pub fn register_syscalls(
invoke_context: &mut InvokeContext,
) -> Result<SyscallRegistry, EbpfError<BpfError>> {
let secp256k1_recover_syscall_enabled = invoke_context
.feature_set
.is_active(&secp256k1_recover_syscall_enabled::id());
let blake3_syscall_enabled = invoke_context
.feature_set
.is_active(&blake3_syscall_enabled::id());
let zk_token_sdk_enabled = invoke_context
.feature_set
.is_active(&zk_token_sdk_enabled::id());
let disable_fees_sysvar = invoke_context
.feature_set
.is_active(&disable_fees_sysvar::id());
let return_data_syscall_enabled = invoke_context
.feature_set
.is_active(&return_data_syscall_enabled::id());
let sol_log_data_syscall_enabled = invoke_context
.feature_set
.is_active(&sol_log_data_syscall_enabled::id());
let add_get_processed_sibling_instruction_syscall = invoke_context
.feature_set
.is_active(&add_get_processed_sibling_instruction_syscall::id());
let mut syscall_registry = SyscallRegistry::default();
// Abort
syscall_registry.register_syscall_by_name(b"abort", SyscallAbort::init, SyscallAbort::call)?;
// Panic
syscall_registry.register_syscall_by_name(
b"sol_panic_",
SyscallPanic::init,
SyscallPanic::call,
)?;
// Logging
syscall_registry.register_syscall_by_name(b"sol_log_", SyscallLog::init, SyscallLog::call)?;
syscall_registry.register_syscall_by_name(
b"sol_log_64_",
SyscallLogU64::init,
SyscallLogU64::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_log_compute_units_",
SyscallLogBpfComputeUnits::init,
SyscallLogBpfComputeUnits::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_log_pubkey",
SyscallLogPubkey::init,
SyscallLogPubkey::call,
)?;
// Program defined addresses (PDA)
syscall_registry.register_syscall_by_name(
b"sol_create_program_address",
SyscallCreateProgramAddress::init,
SyscallCreateProgramAddress::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_try_find_program_address",
SyscallTryFindProgramAddress::init,
SyscallTryFindProgramAddress::call,
)?;
// Sha256
syscall_registry.register_syscall_by_name(
b"sol_sha256",
SyscallSha256::init,
SyscallSha256::call,
)?;
// Keccak256
syscall_registry.register_syscall_by_name(
b"sol_keccak256",
SyscallKeccak256::init,
SyscallKeccak256::call,
)?;
// Secp256k1 Recover
register_feature_gated_syscall!(
syscall_registry,
secp256k1_recover_syscall_enabled,
b"sol_secp256k1_recover",
SyscallSecp256k1Recover::init,
SyscallSecp256k1Recover::call,
)?;
// Blake3
register_feature_gated_syscall!(
syscall_registry,
blake3_syscall_enabled,
b"sol_blake3",
SyscallBlake3::init,
SyscallBlake3::call,
)?;
// ZK Token
register_feature_gated_syscall!(
syscall_registry,
zk_token_sdk_enabled,
b"sol_zk_token_elgamal_op",
SyscallZkTokenElgamalOp::init,
SyscallZkTokenElgamalOp::call,
)?;
register_feature_gated_syscall!(
syscall_registry,
zk_token_sdk_enabled,
b"sol_zk_token_elgamal_op_with_lo_hi",
SyscallZkTokenElgamalOpWithLoHi::init,
SyscallZkTokenElgamalOpWithLoHi::call,
)?;
register_feature_gated_syscall!(
syscall_registry,
zk_token_sdk_enabled,
b"sol_zk_token_elgamal_op_with_scalar",
SyscallZkTokenElgamalOpWithScalar::init,
SyscallZkTokenElgamalOpWithScalar::call,
)?;
// Sysvars
syscall_registry.register_syscall_by_name(
b"sol_get_clock_sysvar",
SyscallGetClockSysvar::init,
SyscallGetClockSysvar::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_get_epoch_schedule_sysvar",
SyscallGetEpochScheduleSysvar::init,
SyscallGetEpochScheduleSysvar::call,
)?;
register_feature_gated_syscall!(
syscall_registry,
!disable_fees_sysvar,
b"sol_get_fees_sysvar",
SyscallGetFeesSysvar::init,
SyscallGetFeesSysvar::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_get_rent_sysvar",
SyscallGetRentSysvar::init,
SyscallGetRentSysvar::call,
)?;
// Memory ops
syscall_registry.register_syscall_by_name(
b"sol_memcpy_",
SyscallMemcpy::init,
SyscallMemcpy::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_memmove_",
SyscallMemmove::init,
SyscallMemmove::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_memcmp_",
SyscallMemcmp::init,
SyscallMemcmp::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_memset_",
SyscallMemset::init,
SyscallMemset::call,
)?;
// Cross-program invocation
syscall_registry.register_syscall_by_name(
b"sol_invoke_signed_c",
SyscallInvokeSignedC::init,
SyscallInvokeSignedC::call,
)?;
syscall_registry.register_syscall_by_name(
b"sol_invoke_signed_rust",
SyscallInvokeSignedRust::init,
SyscallInvokeSignedRust::call,
)?;
// Memory allocator
syscall_registry.register_syscall_by_name(
b"sol_alloc_free_",
SyscallAllocFree::init,
SyscallAllocFree::call,
)?;
// Return data
register_feature_gated_syscall!(
syscall_registry,
return_data_syscall_enabled,
b"sol_set_return_data",
SyscallSetReturnData::init,
SyscallSetReturnData::call,
)?;
register_feature_gated_syscall!(
syscall_registry,
return_data_syscall_enabled,
b"sol_get_return_data",
SyscallGetReturnData::init,
SyscallGetReturnData::call,
)?;
// Log data
register_feature_gated_syscall!(
syscall_registry,
sol_log_data_syscall_enabled,
b"sol_log_data",
SyscallLogData::init,
SyscallLogData::call,
)?;
// Processed sibling instructions
register_feature_gated_syscall!(
syscall_registry,
add_get_processed_sibling_instruction_syscall,
b"sol_get_processed_sibling_instruction",
SyscallGetProcessedSiblingInstruction::init,
SyscallGetProcessedSiblingInstruction::call,
)?;
// Stack height
register_feature_gated_syscall!(
syscall_registry,
add_get_processed_sibling_instruction_syscall,
b"sol_get_stack_height",
SyscallGetStackHeight::init,
SyscallGetStackHeight::call,
)?;
Ok(syscall_registry)
}
pub fn bind_syscall_context_objects<'a, 'b>(
vm: &mut EbpfVm<'a, BpfError, crate::ThisInstructionMeter>,
invoke_context: &'a mut InvokeContext<'b>,
heap: AlignedMemory,
) -> Result<(), EbpfError<BpfError>> {
invoke_context.set_check_aligned(
bpf_loader_deprecated::id()
!= invoke_context
.transaction_context
.get_current_instruction_context()
.and_then(|instruction_context| {
instruction_context
.try_borrow_program_account(invoke_context.transaction_context)
})
.map(|program_account| *program_account.get_owner())
.map_err(SyscallError::InstructionError)?,
);
invoke_context.set_check_size(
invoke_context
.feature_set
.is_active(&check_slice_translation_size::id()),
);
invoke_context
.set_allocator(Rc::new(RefCell::new(BpfAllocator::new(
heap,
ebpf::MM_HEAP_START,
))))
.map_err(SyscallError::InstructionError)?;
let invoke_context = Rc::new(RefCell::new(invoke_context));
vm.bind_syscall_context_objects(invoke_context)?;
Ok(())
}
fn translate(
memory_mapping: &MemoryMapping,
access_type: AccessType,
vm_addr: u64,
len: u64,
) -> Result<u64, EbpfError<BpfError>> {
memory_mapping.map::<BpfError>(access_type, vm_addr, len)
}
fn translate_type_inner<'a, T>(
memory_mapping: &MemoryMapping,
access_type: AccessType,
vm_addr: u64,
check_aligned: bool,
) -> Result<&'a mut T, EbpfError<BpfError>> {
let host_addr = translate(memory_mapping, access_type, vm_addr, size_of::<T>() as u64)?;
if check_aligned && (host_addr as *mut T as usize).wrapping_rem(align_of::<T>()) != 0 {
return Err(SyscallError::UnalignedPointer.into());
}
Ok(unsafe { &mut *(host_addr as *mut T) })
}
fn translate_type_mut<'a, T>(
memory_mapping: &MemoryMapping,
vm_addr: u64,
check_aligned: bool,
) -> Result<&'a mut T, EbpfError<BpfError>> {
translate_type_inner::<T>(memory_mapping, AccessType::Store, vm_addr, check_aligned)
}
fn translate_type<'a, T>(
memory_mapping: &MemoryMapping,
vm_addr: u64,
check_aligned: bool,
) -> Result<&'a T, EbpfError<BpfError>> {
translate_type_inner::<T>(memory_mapping, AccessType::Load, vm_addr, check_aligned)
.map(|value| &*value)
}
fn translate_slice_inner<'a, T>(
memory_mapping: &MemoryMapping,
access_type: AccessType,
vm_addr: u64,
len: u64,
check_aligned: bool,
check_size: bool,
) -> Result<&'a mut [T], EbpfError<BpfError>> {
if len == 0 {
return Ok(&mut []);
}
let total_size = len.saturating_mul(size_of::<T>() as u64);
if check_size & isize::try_from(total_size).is_err() {
return Err(SyscallError::InvalidLength.into());
}
let host_addr = translate(memory_mapping, access_type, vm_addr, total_size)?;
if check_aligned && (host_addr as *mut T as usize).wrapping_rem(align_of::<T>()) != 0 {
return Err(SyscallError::UnalignedPointer.into());
}
Ok(unsafe { from_raw_parts_mut(host_addr as *mut T, len as usize) })
}
fn translate_slice_mut<'a, T>(
memory_mapping: &MemoryMapping,
vm_addr: u64,
len: u64,
check_aligned: bool,
check_size: bool,
) -> Result<&'a mut [T], EbpfError<BpfError>> {
translate_slice_inner::<T>(
memory_mapping,
AccessType::Store,
vm_addr,
len,
check_aligned,
check_size,
)
}
fn translate_slice<'a, T>(
memory_mapping: &MemoryMapping,
vm_addr: u64,
len: u64,
check_aligned: bool,
check_size: bool,
) -> Result<&'a [T], EbpfError<BpfError>> {
translate_slice_inner::<T>(
memory_mapping,
AccessType::Load,
vm_addr,
len,
check_aligned,
check_size,
)
.map(|value| &*value)
}
/// Take a virtual pointer to a string (points to BPF VM memory space), translate it
/// pass it to a user-defined work function
fn translate_string_and_do(
memory_mapping: &MemoryMapping,
addr: u64,
len: u64,
check_aligned: bool,
check_size: bool,
work: &mut dyn FnMut(&str) -> Result<u64, EbpfError<BpfError>>,
) -> Result<u64, EbpfError<BpfError>> {
let buf = translate_slice::<u8>(memory_mapping, addr, len, check_aligned, check_size)?;
let i = match buf.iter().position(|byte| *byte == 0) {
Some(i) => i,
None => len as usize,
};
let msg = buf.get(..i).ok_or(SyscallError::InvalidLength)?;
match from_utf8(msg) {
Ok(message) => work(message),
Err(err) => Err(SyscallError::InvalidString(err, msg.to_vec()).into()),
}
}
type SyscallContext<'a, 'b> = Rc<RefCell<&'a mut InvokeContext<'b>>>;
macro_rules! declare_syscall {
($(#[$attr:meta])* $name:ident, $call:item) => {
$(#[$attr])*
pub struct $name<'a, 'b> {
invoke_context: SyscallContext<'a, 'b>,
}
impl<'a, 'b> $name<'a, 'b> {
pub fn init(
invoke_context: SyscallContext<'a, 'b>,
) -> Box<(dyn SyscallObject<BpfError> + 'a)> {
Box::new(Self { invoke_context })
}
}
impl<'a, 'b> SyscallObject<BpfError> for $name<'a, 'b> {
$call
}
};
}
declare_syscall!(
/// Abort syscall functions, called when the BPF program calls `abort()`
/// LLVM will insert calls to `abort()` if it detects an untenable situation,
/// `abort()` is not intended to be called explicitly by the program.
/// Causes the BPF program to be halted immediately
SyscallAbort,
fn call(
&mut self,
_arg1: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
_memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let _ = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
*result = Err(SyscallError::Abort.into());
}
);
declare_syscall!(
/// Panic syscall function, called when the BPF program calls 'sol_panic_()`
/// Causes the BPF program to be halted immediately
SyscallPanic,
fn call(
&mut self,
file: u64,
len: u64,
line: u64,
column: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
if !invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
{
question_mark!(invoke_context.get_compute_meter().consume(len), result);
}
*result = translate_string_and_do(
memory_mapping,
file,
len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
&mut |string: &str| Err(SyscallError::Panic(string.to_string(), line, column).into()),
);
}
);
declare_syscall!(
/// Log a user's info message
SyscallLog,
fn call(
&mut self,
addr: u64,
len: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
{
invoke_context
.get_compute_budget()
.syscall_base_cost
.max(len)
} else {
len
};
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
question_mark!(
translate_string_and_do(
memory_mapping,
addr,
len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
&mut |string: &str| {
stable_log::program_log(&invoke_context.get_log_collector(), string);
Ok(0)
}
),
result
);
*result = Ok(0);
}
);
declare_syscall!(
/// Log 5 64-bit values
SyscallLogU64,
fn call(
&mut self,
arg1: u64,
arg2: u64,
arg3: u64,
arg4: u64,
arg5: u64,
_memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context.get_compute_budget().log_64_units;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
stable_log::program_log(
&invoke_context.get_log_collector(),
&format!(
"{:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
arg1, arg2, arg3, arg4, arg5
),
);
*result = Ok(0);
}
);
declare_syscall!(
/// Log current compute consumption
SyscallLogBpfComputeUnits,
fn call(
&mut self,
_arg1: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
_memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
{
invoke_context.get_compute_budget().syscall_base_cost
} else {
0
};
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
ic_logger_msg!(
invoke_context.get_log_collector(),
"Program consumption: {} units remaining",
invoke_context.get_compute_meter().borrow().get_remaining()
);
*result = Ok(0);
}
);
declare_syscall!(
/// Log 5 64-bit values
SyscallLogPubkey,
fn call(
&mut self,
pubkey_addr: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context.get_compute_budget().log_pubkey_units;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let pubkey = question_mark!(
translate_type::<Pubkey>(
memory_mapping,
pubkey_addr,
invoke_context.get_check_aligned()
),
result
);
stable_log::program_log(&invoke_context.get_log_collector(), &pubkey.to_string());
*result = Ok(0);
}
);
declare_syscall!(
/// Dynamic memory allocation syscall called when the BPF program calls
/// `sol_alloc_free_()`. The allocator is expected to allocate/free
/// from/to a given chunk of memory and enforce size restrictions. The
/// memory chunk is given to the allocator during allocator creation and
/// information about that memory (start address and size) is passed
/// to the VM to use for enforcement.
SyscallAllocFree,
fn call(
&mut self,
size: u64,
free_addr: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
_memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let allocator = question_mark!(
invoke_context
.get_allocator()
.map_err(SyscallError::InstructionError),
result
);
let mut allocator = question_mark!(
allocator
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let align = if invoke_context.get_check_aligned() {
BPF_ALIGN_OF_U128
} else {
align_of::<u8>()
};
let layout = match Layout::from_size_align(size as usize, align) {
Ok(layout) => layout,
Err(_) => {
*result = Ok(0);
return;
}
};
*result = if free_addr == 0 {
match allocator.alloc(layout) {
Ok(addr) => Ok(addr as u64),
Err(_) => Ok(0),
}
} else {
allocator.dealloc(free_addr, layout);
Ok(0)
};
}
);
fn translate_and_check_program_address_inputs<'a>(
seeds_addr: u64,
seeds_len: u64,
program_id_addr: u64,
memory_mapping: &MemoryMapping,
check_aligned: bool,
check_size: bool,
) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError<BpfError>> {
let untranslated_seeds = translate_slice::<&[&u8]>(
memory_mapping,
seeds_addr,
seeds_len,
check_aligned,
check_size,
)?;
if untranslated_seeds.len() > MAX_SEEDS {
return Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into());
}
let seeds = untranslated_seeds
.iter()
.map(|untranslated_seed| {
if untranslated_seed.len() > MAX_SEED_LEN {
return Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into());
}
translate_slice::<u8>(
memory_mapping,
untranslated_seed.as_ptr() as *const _ as u64,
untranslated_seed.len() as u64,
check_aligned,
check_size,
)
})
.collect::<Result<Vec<_>, EbpfError<BpfError>>>()?;
let program_id = translate_type::<Pubkey>(memory_mapping, program_id_addr, check_aligned)?;
Ok((seeds, program_id))
}
declare_syscall!(
/// Create a program address
SyscallCreateProgramAddress,
fn call(
&mut self,
seeds_addr: u64,
seeds_len: u64,
program_id_addr: u64,
address_addr: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context
.get_compute_budget()
.create_program_address_units;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let (seeds, program_id) = question_mark!(
translate_and_check_program_address_inputs(
seeds_addr,
seeds_len,
program_id_addr,
memory_mapping,
invoke_context.get_check_aligned(),
invoke_context.get_check_size()
),
result
);
let new_address = match Pubkey::create_program_address(&seeds, program_id) {
Ok(address) => address,
Err(_) => {
*result = Ok(1);
return;
}
};
let address = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
address_addr,
32,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
address.copy_from_slice(new_address.as_ref());
*result = Ok(0);
}
);
declare_syscall!(
/// Create a program address
SyscallTryFindProgramAddress,
fn call(
&mut self,
seeds_addr: u64,
seeds_len: u64,
program_id_addr: u64,
address_addr: u64,
bump_seed_addr: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context
.get_compute_budget()
.create_program_address_units;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let (seeds, program_id) = question_mark!(
translate_and_check_program_address_inputs(
seeds_addr,
seeds_len,
program_id_addr,
memory_mapping,
invoke_context.get_check_aligned(),
invoke_context.get_check_size()
),
result
);
let mut bump_seed = [std::u8::MAX];
for _ in 0..std::u8::MAX {
{
let mut seeds_with_bump = seeds.to_vec();
seeds_with_bump.push(&bump_seed);
if let Ok(new_address) =
Pubkey::create_program_address(&seeds_with_bump, program_id)
{
let bump_seed_ref = question_mark!(
translate_type_mut::<u8>(
memory_mapping,
bump_seed_addr,
invoke_context.get_check_aligned()
),
result
);
let address = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
address_addr,
32,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
*bump_seed_ref = bump_seed[0];
address.copy_from_slice(new_address.as_ref());
*result = Ok(0);
return;
}
}
bump_seed[0] = bump_seed[0].saturating_sub(1);
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
}
*result = Ok(1);
}
);
declare_syscall!(
/// SHA256
SyscallSha256,
fn call(
&mut self,
vals_addr: u64,
vals_len: u64,
result_addr: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let compute_budget = invoke_context.get_compute_budget();
if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
&& compute_budget.sha256_max_slices < vals_len
{
ic_msg!(
invoke_context,
"Sha256 hashing {} sequences in one syscall is over the limit {}",
vals_len,
compute_budget.sha256_max_slices,
);
*result = Err(SyscallError::TooManySlices.into());
return;
}
question_mark!(
invoke_context
.get_compute_meter()
.consume(compute_budget.sha256_base_cost),
result
);
let hash_result = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
result_addr,
HASH_BYTES as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let mut hasher = Hasher::default();
if vals_len > 0 {
let vals = question_mark!(
translate_slice::<&[u8]>(
memory_mapping,
vals_addr,
vals_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
for val in vals.iter() {
let bytes = question_mark!(
translate_slice::<u8>(
memory_mapping,
val.as_ptr() as u64,
val.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let cost = if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
{
compute_budget.mem_op_base_cost.max(
compute_budget
.sha256_byte_cost
.saturating_mul((val.len() as u64).saturating_div(2)),
)
} else {
compute_budget
.sha256_byte_cost
.saturating_mul((val.len() as u64).saturating_div(2))
};
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
hasher.hash(bytes);
}
}
hash_result.copy_from_slice(&hasher.result().to_bytes());
*result = Ok(0);
}
);
fn get_sysvar<T: std::fmt::Debug + Sysvar + SysvarId + Clone>(
sysvar: Result<Arc<T>, InstructionError>,
var_addr: u64,
check_aligned: bool,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
) -> Result<u64, EbpfError<BpfError>> {
invoke_context.get_compute_meter().consume(
invoke_context
.get_compute_budget()
.sysvar_base_cost
.saturating_add(size_of::<T>() as u64),
)?;
let var = translate_type_mut::<T>(memory_mapping, var_addr, check_aligned)?;
let sysvar: Arc<T> = sysvar.map_err(SyscallError::InstructionError)?;
*var = T::clone(sysvar.as_ref());
Ok(SUCCESS)
}
declare_syscall!(
/// Get a Clock sysvar
SyscallGetClockSysvar,
fn call(
&mut self,
var_addr: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let mut invoke_context = question_mark!(
self.invoke_context
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
*result = get_sysvar(
invoke_context.get_sysvar_cache().get_clock(),
var_addr,
invoke_context.get_check_aligned(),
memory_mapping,
&mut invoke_context,
);
}
);
declare_syscall!(
/// Get a EpochSchedule sysvar
SyscallGetEpochScheduleSysvar,
fn call(
&mut self,
var_addr: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let mut invoke_context = question_mark!(
self.invoke_context
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
*result = get_sysvar(
invoke_context.get_sysvar_cache().get_epoch_schedule(),
var_addr,
invoke_context.get_check_aligned(),
memory_mapping,
&mut invoke_context,
);
}
);
declare_syscall!(
/// Get a Fees sysvar
SyscallGetFeesSysvar,
fn call(
&mut self,
var_addr: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let mut invoke_context = question_mark!(
self.invoke_context
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
#[allow(deprecated)]
{
*result = get_sysvar(
invoke_context.get_sysvar_cache().get_fees(),
var_addr,
invoke_context.get_check_aligned(),
memory_mapping,
&mut invoke_context,
);
}
}
);
declare_syscall!(
/// Get a Rent sysvar
SyscallGetRentSysvar,
fn call(
&mut self,
var_addr: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let mut invoke_context = question_mark!(
self.invoke_context
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
*result = get_sysvar(
invoke_context.get_sysvar_cache().get_rent(),
var_addr,
invoke_context.get_check_aligned(),
memory_mapping,
&mut invoke_context,
);
}
);
declare_syscall!(
// Keccak256
SyscallKeccak256,
fn call(
&mut self,
vals_addr: u64,
vals_len: u64,
result_addr: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let compute_budget = invoke_context.get_compute_budget();
if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
&& compute_budget.sha256_max_slices < vals_len
{
ic_msg!(
invoke_context,
"Keccak256 hashing {} sequences in one syscall is over the limit {}",
vals_len,
compute_budget.sha256_max_slices,
);
*result = Err(SyscallError::TooManySlices.into());
return;
}
question_mark!(
invoke_context
.get_compute_meter()
.consume(compute_budget.sha256_base_cost),
result
);
let hash_result = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
result_addr,
keccak::HASH_BYTES as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let mut hasher = keccak::Hasher::default();
if vals_len > 0 {
let vals = question_mark!(
translate_slice::<&[u8]>(
memory_mapping,
vals_addr,
vals_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
for val in vals.iter() {
let bytes = question_mark!(
translate_slice::<u8>(
memory_mapping,
val.as_ptr() as u64,
val.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let cost = if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
{
compute_budget.mem_op_base_cost.max(
compute_budget
.sha256_byte_cost
.saturating_mul((val.len() as u64).saturating_div(2)),
)
} else {
compute_budget
.sha256_byte_cost
.saturating_mul((val.len() as u64).saturating_div(2))
};
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
hasher.hash(bytes);
}
}
hash_result.copy_from_slice(&hasher.result().to_bytes());
*result = Ok(0);
}
);
/// This function is incorrect due to arithmetic overflow and only exists for
/// backwards compatibility. Instead use program_stubs::is_nonoverlapping.
#[allow(clippy::integer_arithmetic)]
fn check_overlapping_do_not_use(src_addr: u64, dst_addr: u64, n: u64) -> bool {
(src_addr <= dst_addr && src_addr + n > dst_addr)
|| (dst_addr <= src_addr && dst_addr + n > src_addr)
}
fn mem_op_consume<'a, 'b>(
invoke_context: &Ref<&'a mut InvokeContext<'b>>,
n: u64,
) -> Result<(), EbpfError<BpfError>> {
let compute_budget = invoke_context.get_compute_budget();
let cost = if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
{
compute_budget
.mem_op_base_cost
.max(n.saturating_div(compute_budget.cpi_bytes_per_unit))
} else {
n.saturating_div(compute_budget.cpi_bytes_per_unit)
};
invoke_context.get_compute_meter().consume(cost)
}
declare_syscall!(
/// memcpy
SyscallMemcpy,
fn call(
&mut self,
dst_addr: u64,
src_addr: u64,
n: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
// When deprecating `update_syscall_base_costs` switch to `mem_op_consume`
let compute_budget = invoke_context.get_compute_budget();
let update_syscall_base_costs = invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id());
if update_syscall_base_costs {
let cost = compute_budget
.mem_op_base_cost
.max(n.saturating_div(compute_budget.cpi_bytes_per_unit));
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
}
let use_fixed_nonoverlapping_check = invoke_context
.feature_set
.is_active(&fixed_memcpy_nonoverlapping_check::id());
let do_check_physical_overlapping = invoke_context
.feature_set
.is_active(&check_physical_overlapping::id());
#[allow(clippy::collapsible_else_if)]
if use_fixed_nonoverlapping_check {
if !is_nonoverlapping(src_addr, dst_addr, n) {
*result = Err(SyscallError::CopyOverlapping.into());
return;
}
} else {
if check_overlapping_do_not_use(src_addr, dst_addr, n) {
*result = Err(SyscallError::CopyOverlapping.into());
return;
}
}
if !update_syscall_base_costs {
let cost = n.saturating_div(compute_budget.cpi_bytes_per_unit);
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
};
let dst_ptr = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
dst_addr,
n,
invoke_context.get_check_aligned(),
invoke_context.get_check_size()
),
result
)
.as_mut_ptr();
let src_ptr = question_mark!(
translate_slice::<u8>(
memory_mapping,
src_addr,
n,
invoke_context.get_check_aligned(),
invoke_context.get_check_size()
),
result
)
.as_ptr();
if do_check_physical_overlapping
&& !is_nonoverlapping(src_ptr as usize, dst_ptr as usize, n as usize)
{
unsafe {
std::ptr::copy(src_ptr, dst_ptr, n as usize);
}
} else {
unsafe {
std::ptr::copy_nonoverlapping(src_ptr, dst_ptr, n as usize);
}
}
*result = Ok(0);
}
);
declare_syscall!(
/// memmove
SyscallMemmove,
fn call(
&mut self,
dst_addr: u64,
src_addr: u64,
n: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
question_mark!(mem_op_consume(&invoke_context, n), result);
let dst = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
dst_addr,
n,
invoke_context.get_check_aligned(),
invoke_context.get_check_size()
),
result
);
let src = question_mark!(
translate_slice::<u8>(
memory_mapping,
src_addr,
n,
invoke_context.get_check_aligned(),
invoke_context.get_check_size()
),
result
);
unsafe {
std::ptr::copy(src.as_ptr(), dst.as_mut_ptr(), n as usize);
}
*result = Ok(0);
}
);
declare_syscall!(
/// memcmp
SyscallMemcmp,
fn call(
&mut self,
s1_addr: u64,
s2_addr: u64,
n: u64,
cmp_result_addr: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
question_mark!(mem_op_consume(&invoke_context, n), result);
let s1 = question_mark!(
translate_slice::<u8>(
memory_mapping,
s1_addr,
n,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let s2 = question_mark!(
translate_slice::<u8>(
memory_mapping,
s2_addr,
n,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let cmp_result = question_mark!(
translate_type_mut::<i32>(
memory_mapping,
cmp_result_addr,
invoke_context.get_check_aligned()
),
result
);
let mut i = 0;
while i < n as usize {
let a = *question_mark!(s1.get(i).ok_or(SyscallError::InvalidLength,), result);
let b = *question_mark!(s2.get(i).ok_or(SyscallError::InvalidLength,), result);
if a != b {
*cmp_result = if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
(a as i32).saturating_sub(b as i32)
} else {
#[allow(clippy::integer_arithmetic)]
{
a as i32 - b as i32
}
};
*result = Ok(0);
return;
};
i = i.saturating_add(1);
}
*cmp_result = 0;
*result = Ok(0);
}
);
declare_syscall!(
/// memset
SyscallMemset,
fn call(
&mut self,
s_addr: u64,
c: u64,
n: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
question_mark!(mem_op_consume(&invoke_context, n), result);
let s = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
s_addr,
n,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
for val in s.iter_mut().take(n as usize) {
*val = c as u8;
}
*result = Ok(0);
}
);
declare_syscall!(
/// secp256k1_recover
SyscallSecp256k1Recover,
fn call(
&mut self,
hash_addr: u64,
recovery_id_val: u64,
signature_addr: u64,
result_addr: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context.get_compute_budget().secp256k1_recover_cost;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let hash = question_mark!(
translate_slice::<u8>(
memory_mapping,
hash_addr,
keccak::HASH_BYTES as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let signature = question_mark!(
translate_slice::<u8>(
memory_mapping,
signature_addr,
SECP256K1_SIGNATURE_LENGTH as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let secp256k1_recover_result = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
result_addr,
SECP256K1_PUBLIC_KEY_LENGTH as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let message = match libsecp256k1::Message::parse_slice(hash) {
Ok(msg) => msg,
Err(_) => {
*result = Ok(Secp256k1RecoverError::InvalidHash.into());
return;
}
};
let adjusted_recover_id_val = if invoke_context
.feature_set
.is_active(&limit_secp256k1_recovery_id::id())
{
match recovery_id_val.try_into() {
Ok(adjusted_recover_id_val) => adjusted_recover_id_val,
Err(_) => {
*result = Ok(Secp256k1RecoverError::InvalidRecoveryId.into());
return;
}
}
} else {
recovery_id_val as u8
};
let recovery_id = match libsecp256k1::RecoveryId::parse(adjusted_recover_id_val) {
Ok(id) => id,
Err(_) => {
*result = Ok(Secp256k1RecoverError::InvalidRecoveryId.into());
return;
}
};
let sig_parse_result = if invoke_context
.feature_set
.is_active(&libsecp256k1_0_5_upgrade_enabled::id())
{
libsecp256k1::Signature::parse_standard_slice(signature)
} else {
libsecp256k1::Signature::parse_overflowing_slice(signature)
};
let signature = match sig_parse_result {
Ok(sig) => sig,
Err(_) => {
*result = Ok(Secp256k1RecoverError::InvalidSignature.into());
return;
}
};
let public_key = match libsecp256k1::recover(&message, &signature, &recovery_id) {
Ok(key) => key.serialize(),
Err(_) => {
*result = Ok(Secp256k1RecoverError::InvalidSignature.into());
return;
}
};
secp256k1_recover_result.copy_from_slice(&public_key[1..65]);
*result = Ok(SUCCESS);
}
);
declare_syscall!(
SyscallZkTokenElgamalOp,
fn call(
&mut self,
op: u64,
ct_0_addr: u64,
ct_1_addr: u64,
ct_result_addr: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
use solana_zk_token_sdk::zk_token_elgamal::{ops, pod};
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context.get_compute_budget().zk_token_elgamal_op_cost;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let ct_0 = question_mark!(
translate_type::<pod::ElGamalCiphertext>(
memory_mapping,
ct_0_addr,
invoke_context.get_check_aligned()
),
result
);
let ct_1 = question_mark!(
translate_type::<pod::ElGamalCiphertext>(
memory_mapping,
ct_1_addr,
invoke_context.get_check_aligned()
),
result
);
if let Some(ct_result) = match op {
ops::OP_ADD => ops::add(ct_0, ct_1),
ops::OP_SUB => ops::subtract(ct_0, ct_1),
_ => None,
} {
*question_mark!(
translate_type_mut::<pod::ElGamalCiphertext>(
memory_mapping,
ct_result_addr,
invoke_context.get_check_aligned(),
),
result
) = ct_result;
*result = Ok(0);
} else {
*result = Ok(1);
}
}
);
declare_syscall!(
SyscallZkTokenElgamalOpWithLoHi,
fn call(
&mut self,
op: u64,
ct_0_addr: u64,
ct_1_lo_addr: u64,
ct_1_hi_addr: u64,
ct_result_addr: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
use solana_zk_token_sdk::zk_token_elgamal::{ops, pod};
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context.get_compute_budget().zk_token_elgamal_op_cost;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let ct_0 = question_mark!(
translate_type::<pod::ElGamalCiphertext>(
memory_mapping,
ct_0_addr,
invoke_context.get_check_aligned()
),
result
);
let ct_1_lo = question_mark!(
translate_type::<pod::ElGamalCiphertext>(
memory_mapping,
ct_1_lo_addr,
invoke_context.get_check_aligned()
),
result
);
let ct_1_hi = question_mark!(
translate_type::<pod::ElGamalCiphertext>(
memory_mapping,
ct_1_hi_addr,
invoke_context.get_check_aligned()
),
result
);
if let Some(ct_result) = match op {
ops::OP_ADD => ops::add_with_lo_hi(ct_0, ct_1_lo, ct_1_hi),
ops::OP_SUB => ops::subtract_with_lo_hi(ct_0, ct_1_lo, ct_1_hi),
_ => None,
} {
*question_mark!(
translate_type_mut::<pod::ElGamalCiphertext>(
memory_mapping,
ct_result_addr,
invoke_context.get_check_aligned(),
),
result
) = ct_result;
*result = Ok(0);
} else {
*result = Ok(1);
}
}
);
declare_syscall!(
SyscallZkTokenElgamalOpWithScalar,
fn call(
&mut self,
op: u64,
ct_addr: u64,
scalar: u64,
ct_result_addr: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
use solana_zk_token_sdk::zk_token_elgamal::{ops, pod};
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let cost = invoke_context.get_compute_budget().zk_token_elgamal_op_cost;
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let ct = question_mark!(
translate_type::<pod::ElGamalCiphertext>(
memory_mapping,
ct_addr,
invoke_context.get_check_aligned()
),
result
);
if let Some(ct_result) = match op {
ops::OP_ADD => ops::add_to(ct, scalar),
ops::OP_SUB => ops::subtract_from(ct, scalar),
_ => None,
} {
*question_mark!(
translate_type_mut::<pod::ElGamalCiphertext>(
memory_mapping,
ct_result_addr,
invoke_context.get_check_aligned(),
),
result
) = ct_result;
*result = Ok(0);
} else {
*result = Ok(1);
}
}
);
declare_syscall!(
// Blake3
SyscallBlake3,
fn call(
&mut self,
vals_addr: u64,
vals_len: u64,
result_addr: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let compute_budget = invoke_context.get_compute_budget();
if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
&& compute_budget.sha256_max_slices < vals_len
{
ic_msg!(
invoke_context,
"Blake3 hashing {} sequences in one syscall is over the limit {}",
vals_len,
compute_budget.sha256_max_slices,
);
*result = Err(SyscallError::TooManySlices.into());
return;
}
question_mark!(
invoke_context
.get_compute_meter()
.consume(compute_budget.sha256_base_cost),
result
);
let hash_result = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
result_addr,
blake3::HASH_BYTES as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let mut hasher = blake3::Hasher::default();
if vals_len > 0 {
let vals = question_mark!(
translate_slice::<&[u8]>(
memory_mapping,
vals_addr,
vals_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
for val in vals.iter() {
let bytes = question_mark!(
translate_slice::<u8>(
memory_mapping,
val.as_ptr() as u64,
val.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let cost = if invoke_context
.feature_set
.is_active(&update_syscall_base_costs::id())
{
compute_budget.mem_op_base_cost.max(
compute_budget
.sha256_byte_cost
.saturating_mul((val.len() as u64).saturating_div(2)),
)
} else if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
compute_budget
.sha256_byte_cost
.saturating_mul((val.len() as u64).saturating_div(2))
} else {
#[allow(clippy::integer_arithmetic)]
{
compute_budget.sha256_byte_cost * (val.len() as u64 / 2)
}
};
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
hasher.hash(bytes);
}
}
hash_result.copy_from_slice(&hasher.result().to_bytes());
*result = Ok(0);
}
);
// Cross-program invocation syscalls
struct CallerAccount<'a> {
lamports: &'a mut u64,
owner: &'a mut Pubkey,
original_data_len: usize,
data: &'a mut [u8],
vm_data_addr: u64,
ref_to_len_in_vm: &'a mut u64,
serialized_len_ptr: &'a mut u64,
executable: bool,
rent_epoch: u64,
}
type TranslatedAccounts<'a> = Vec<(usize, Option<CallerAccount<'a>>)>;
/// Implemented by language specific data structure translators
trait SyscallInvokeSigned<'a, 'b> {
fn get_context_mut(&self) -> Result<RefMut<&'a mut InvokeContext<'b>>, EbpfError<BpfError>>;
fn translate_instruction(
&self,
addr: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
) -> Result<Instruction, EbpfError<BpfError>>;
fn translate_accounts<'c>(
&'c self,
instruction_accounts: &[InstructionAccount],
program_indices: &[usize],
account_infos_addr: u64,
account_infos_len: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
) -> Result<TranslatedAccounts<'c>, EbpfError<BpfError>>;
fn translate_signers(
&self,
program_id: &Pubkey,
signers_seeds_addr: u64,
signers_seeds_len: u64,
memory_mapping: &MemoryMapping,
invoke_context: &InvokeContext,
) -> Result<Vec<Pubkey>, EbpfError<BpfError>>;
}
declare_syscall!(
/// Cross-program invocation called from Rust
SyscallInvokeSignedRust,
fn call(
&mut self,
instruction_addr: u64,
account_infos_addr: u64,
account_infos_len: u64,
signers_seeds_addr: u64,
signers_seeds_len: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
*result = call(
self,
instruction_addr,
account_infos_addr,
account_infos_len,
signers_seeds_addr,
signers_seeds_len,
memory_mapping,
);
}
);
impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedRust<'a, 'b> {
fn get_context_mut(&self) -> Result<RefMut<&'a mut InvokeContext<'b>>, EbpfError<BpfError>> {
self.invoke_context
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed.into())
}
fn translate_instruction(
&self,
addr: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
) -> Result<Instruction, EbpfError<BpfError>> {
let ix = translate_type::<Instruction>(
memory_mapping,
addr,
invoke_context.get_check_aligned(),
)?;
check_instruction_size(ix.accounts.len(), ix.data.len(), invoke_context)?;
let accounts = translate_slice::<AccountMeta>(
memory_mapping,
ix.accounts.as_ptr() as u64,
ix.accounts.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?
.to_vec();
let data = translate_slice::<u8>(
memory_mapping,
ix.data.as_ptr() as u64,
ix.data.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?
.to_vec();
Ok(Instruction {
program_id: ix.program_id,
accounts,
data,
})
}
fn translate_accounts<'c>(
&'c self,
instruction_accounts: &[InstructionAccount],
program_indices: &[usize],
account_infos_addr: u64,
account_infos_len: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
) -> Result<TranslatedAccounts<'c>, EbpfError<BpfError>> {
let account_infos = translate_slice::<AccountInfo>(
memory_mapping,
account_infos_addr,
account_infos_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
check_account_infos(account_infos.len(), invoke_context)?;
let account_info_keys = account_infos
.iter()
.map(|account_info| {
translate_type::<Pubkey>(
memory_mapping,
account_info.key as *const _ as u64,
invoke_context.get_check_aligned(),
)
})
.collect::<Result<Vec<_>, EbpfError<BpfError>>>()?;
let translate = |account_info: &AccountInfo, invoke_context: &InvokeContext| {
// Translate the account from user space
let lamports = {
// Double translate lamports out of RefCell
let ptr = translate_type::<u64>(
memory_mapping,
account_info.lamports.as_ptr() as u64,
invoke_context.get_check_aligned(),
)?;
translate_type_mut::<u64>(memory_mapping, *ptr, invoke_context.get_check_aligned())?
};
let owner = translate_type_mut::<Pubkey>(
memory_mapping,
account_info.owner as *const _ as u64,
invoke_context.get_check_aligned(),
)?;
let (data, vm_data_addr, ref_to_len_in_vm, serialized_len_ptr) = {
// Double translate data out of RefCell
let data = *translate_type::<&[u8]>(
memory_mapping,
account_info.data.as_ptr() as *const _ as u64,
invoke_context.get_check_aligned(),
)?;
invoke_context.get_compute_meter().consume(
(data.len() as u64)
.saturating_div(invoke_context.get_compute_budget().cpi_bytes_per_unit),
)?;
let translated = translate(
memory_mapping,
AccessType::Store,
(account_info.data.as_ptr() as *const u64 as u64)
.saturating_add(size_of::<u64>() as u64),
8,
)? as *mut u64;
let ref_to_len_in_vm = unsafe { &mut *translated };
let ref_of_len_in_input_buffer =
(data.as_ptr() as *const _ as u64).saturating_sub(8);
let serialized_len_ptr = translate_type_mut::<u64>(
memory_mapping,
ref_of_len_in_input_buffer,
invoke_context.get_check_aligned(),
)?;
let vm_data_addr = data.as_ptr() as u64;
(
translate_slice_mut::<u8>(
memory_mapping,
vm_data_addr,
data.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?,
vm_data_addr,
ref_to_len_in_vm,
serialized_len_ptr,
)
};
Ok(CallerAccount {
lamports,
owner,
original_data_len: 0, // set later
data,
vm_data_addr,
ref_to_len_in_vm,
serialized_len_ptr,
executable: account_info.executable,
rent_epoch: account_info.rent_epoch,
})
};
get_translated_accounts(
instruction_accounts,
program_indices,
&account_info_keys,
account_infos,
invoke_context,
translate,
)
}
fn translate_signers(
&self,
program_id: &Pubkey,
signers_seeds_addr: u64,
signers_seeds_len: u64,
memory_mapping: &MemoryMapping,
invoke_context: &InvokeContext,
) -> Result<Vec<Pubkey>, EbpfError<BpfError>> {
let mut signers = Vec::new();
if signers_seeds_len > 0 {
let signers_seeds = translate_slice::<&[&[u8]]>(
memory_mapping,
signers_seeds_addr,
signers_seeds_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
if signers_seeds.len() > MAX_SIGNERS {
return Err(SyscallError::TooManySigners.into());
}
for signer_seeds in signers_seeds.iter() {
let untranslated_seeds = translate_slice::<&[u8]>(
memory_mapping,
signer_seeds.as_ptr() as *const _ as u64,
signer_seeds.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
if untranslated_seeds.len() > MAX_SEEDS {
return Err(SyscallError::InstructionError(
InstructionError::MaxSeedLengthExceeded,
)
.into());
}
let seeds = untranslated_seeds
.iter()
.map(|untranslated_seed| {
translate_slice::<u8>(
memory_mapping,
untranslated_seed.as_ptr() as *const _ as u64,
untranslated_seed.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)
})
.collect::<Result<Vec<_>, EbpfError<BpfError>>>()?;
let signer = Pubkey::create_program_address(&seeds, program_id)
.map_err(SyscallError::BadSeeds)?;
signers.push(signer);
}
Ok(signers)
} else {
Ok(vec![])
}
}
}
/// Rust representation of C's SolInstruction
#[derive(Debug)]
#[repr(C)]
struct SolInstruction {
program_id_addr: u64,
accounts_addr: u64,
accounts_len: u64,
data_addr: u64,
data_len: u64,
}
/// Rust representation of C's SolAccountMeta
#[derive(Debug)]
#[repr(C)]
struct SolAccountMeta {
pubkey_addr: u64,
is_writable: bool,
is_signer: bool,
}
/// Rust representation of C's SolAccountInfo
#[derive(Debug)]
#[repr(C)]
struct SolAccountInfo {
key_addr: u64,
lamports_addr: u64,
data_len: u64,
data_addr: u64,
owner_addr: u64,
rent_epoch: u64,
#[allow(dead_code)]
is_signer: bool,
#[allow(dead_code)]
is_writable: bool,
executable: bool,
}
/// Rust representation of C's SolSignerSeed
#[derive(Debug)]
#[repr(C)]
struct SolSignerSeedC {
addr: u64,
len: u64,
}
/// Rust representation of C's SolSignerSeeds
#[derive(Debug)]
#[repr(C)]
struct SolSignerSeedsC {
addr: u64,
len: u64,
}
declare_syscall!(
/// Cross-program invocation called from C
SyscallInvokeSignedC,
fn call(
&mut self,
instruction_addr: u64,
account_infos_addr: u64,
account_infos_len: u64,
signers_seeds_addr: u64,
signers_seeds_len: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
*result = call(
self,
instruction_addr,
account_infos_addr,
account_infos_len,
signers_seeds_addr,
signers_seeds_len,
memory_mapping,
);
}
);
impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedC<'a, 'b> {
fn get_context_mut(&self) -> Result<RefMut<&'a mut InvokeContext<'b>>, EbpfError<BpfError>> {
self.invoke_context
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed.into())
}
fn translate_instruction(
&self,
addr: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
) -> Result<Instruction, EbpfError<BpfError>> {
let ix_c = translate_type::<SolInstruction>(
memory_mapping,
addr,
invoke_context.get_check_aligned(),
)?;
check_instruction_size(
ix_c.accounts_len as usize,
ix_c.data_len as usize,
invoke_context,
)?;
let program_id = translate_type::<Pubkey>(
memory_mapping,
ix_c.program_id_addr,
invoke_context.get_check_aligned(),
)?;
let meta_cs = translate_slice::<SolAccountMeta>(
memory_mapping,
ix_c.accounts_addr,
ix_c.accounts_len as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
let data = translate_slice::<u8>(
memory_mapping,
ix_c.data_addr,
ix_c.data_len as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?
.to_vec();
let accounts = meta_cs
.iter()
.map(|meta_c| {
let pubkey = translate_type::<Pubkey>(
memory_mapping,
meta_c.pubkey_addr,
invoke_context.get_check_aligned(),
)?;
Ok(AccountMeta {
pubkey: *pubkey,
is_signer: meta_c.is_signer,
is_writable: meta_c.is_writable,
})
})
.collect::<Result<Vec<AccountMeta>, EbpfError<BpfError>>>()?;
Ok(Instruction {
program_id: *program_id,
accounts,
data,
})
}
fn translate_accounts<'c>(
&'c self,
instruction_accounts: &[InstructionAccount],
program_indices: &[usize],
account_infos_addr: u64,
account_infos_len: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
) -> Result<TranslatedAccounts<'c>, EbpfError<BpfError>> {
let account_infos = translate_slice::<SolAccountInfo>(
memory_mapping,
account_infos_addr,
account_infos_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
check_account_infos(account_infos.len(), invoke_context)?;
let account_info_keys = account_infos
.iter()
.map(|account_info| {
translate_type::<Pubkey>(
memory_mapping,
account_info.key_addr,
invoke_context.get_check_aligned(),
)
})
.collect::<Result<Vec<_>, EbpfError<BpfError>>>()?;
let translate = |account_info: &SolAccountInfo, invoke_context: &InvokeContext| {
// Translate the account from user space
let lamports = translate_type_mut::<u64>(
memory_mapping,
account_info.lamports_addr,
invoke_context.get_check_aligned(),
)?;
let owner = translate_type_mut::<Pubkey>(
memory_mapping,
account_info.owner_addr,
invoke_context.get_check_aligned(),
)?;
let vm_data_addr = account_info.data_addr;
invoke_context.get_compute_meter().consume(
account_info
.data_len
.saturating_div(invoke_context.get_compute_budget().cpi_bytes_per_unit),
)?;
let data = translate_slice_mut::<u8>(
memory_mapping,
vm_data_addr,
account_info.data_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
let first_info_addr = account_infos.first().ok_or(SyscallError::InstructionError(
InstructionError::InvalidArgument,
))? as *const _ as u64;
let addr = &account_info.data_len as *const u64 as u64;
let vm_addr = if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
account_infos_addr.saturating_add(addr.saturating_sub(first_info_addr))
} else {
#[allow(clippy::integer_arithmetic)]
{
account_infos_addr + (addr - first_info_addr)
}
};
let _ = translate(
memory_mapping,
AccessType::Store,
vm_addr,
size_of::<u64>() as u64,
)?;
let ref_to_len_in_vm = unsafe { &mut *(addr as *mut u64) };
let ref_of_len_in_input_buffer =
(account_info.data_addr as *mut u8 as u64).saturating_sub(8);
let serialized_len_ptr = translate_type_mut::<u64>(
memory_mapping,
ref_of_len_in_input_buffer,
invoke_context.get_check_aligned(),
)?;
Ok(CallerAccount {
lamports,
owner,
original_data_len: 0, // set later
data,
vm_data_addr,
ref_to_len_in_vm,
serialized_len_ptr,
executable: account_info.executable,
rent_epoch: account_info.rent_epoch,
})
};
get_translated_accounts(
instruction_accounts,
program_indices,
&account_info_keys,
account_infos,
invoke_context,
translate,
)
}
fn translate_signers(
&self,
program_id: &Pubkey,
signers_seeds_addr: u64,
signers_seeds_len: u64,
memory_mapping: &MemoryMapping,
invoke_context: &InvokeContext,
) -> Result<Vec<Pubkey>, EbpfError<BpfError>> {
if signers_seeds_len > 0 {
let signers_seeds = translate_slice::<SolSignerSeedsC>(
memory_mapping,
signers_seeds_addr,
signers_seeds_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
if signers_seeds.len() > MAX_SIGNERS {
return Err(SyscallError::TooManySigners.into());
}
Ok(signers_seeds
.iter()
.map(|signer_seeds| {
let seeds = translate_slice::<SolSignerSeedC>(
memory_mapping,
signer_seeds.addr,
signer_seeds.len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
if seeds.len() > MAX_SEEDS {
return Err(SyscallError::InstructionError(
InstructionError::MaxSeedLengthExceeded,
)
.into());
}
let seeds_bytes = seeds
.iter()
.map(|seed| {
translate_slice::<u8>(
memory_mapping,
seed.addr,
seed.len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)
})
.collect::<Result<Vec<_>, EbpfError<BpfError>>>()?;
Pubkey::create_program_address(&seeds_bytes, program_id)
.map_err(|err| SyscallError::BadSeeds(err).into())
})
.collect::<Result<Vec<_>, EbpfError<BpfError>>>()?)
} else {
Ok(vec![])
}
}
}
fn get_translated_accounts<'a, T, F>(
instruction_accounts: &[InstructionAccount],
program_indices: &[usize],
account_info_keys: &[&Pubkey],
account_infos: &[T],
invoke_context: &mut InvokeContext,
do_translate: F,
) -> Result<TranslatedAccounts<'a>, EbpfError<BpfError>>
where
F: Fn(&T, &InvokeContext) -> Result<CallerAccount<'a>, EbpfError<BpfError>>,
{
let instruction_context = invoke_context
.transaction_context
.get_current_instruction_context()
.map_err(SyscallError::InstructionError)?;
let mut accounts = Vec::with_capacity(instruction_accounts.len().saturating_add(1));
let program_account_index = program_indices
.last()
.ok_or(SyscallError::InstructionError(
InstructionError::MissingAccount,
))?;
accounts.push((*program_account_index, None));
visit_each_account_once::<EbpfError<BpfError>>(
instruction_accounts,
&mut |_index: usize, instruction_account: &InstructionAccount| {
let account = invoke_context
.transaction_context
.get_account_at_index(instruction_account.index_in_transaction)
.map_err(SyscallError::InstructionError)?;
let account_key = invoke_context
.transaction_context
.get_key_of_account_at_index(instruction_account.index_in_transaction)
.map_err(SyscallError::InstructionError)?;
if account.borrow().executable() {
// Use the known account
if invoke_context
.feature_set
.is_active(&executables_incur_cpi_data_cost::id())
{
invoke_context
.get_compute_meter()
.consume((account.borrow().data().len() as u64).saturating_div(
invoke_context.get_compute_budget().cpi_bytes_per_unit,
))?;
}
accounts.push((instruction_account.index_in_transaction, None));
} else if let Some(caller_account_index) =
account_info_keys.iter().position(|key| *key == account_key)
{
let mut caller_account = do_translate(
account_infos
.get(caller_account_index)
.ok_or(SyscallError::InvalidLength)?,
invoke_context,
)?;
{
let mut account = account.borrow_mut();
account.copy_into_owner_from_slice(caller_account.owner.as_ref());
account.set_data_from_slice(caller_account.data);
account.set_lamports(*caller_account.lamports);
account.set_executable(caller_account.executable);
account.set_rent_epoch(caller_account.rent_epoch);
}
let caller_account = if instruction_account.is_writable {
let orig_data_len_index = instruction_account
.index_in_caller
.saturating_sub(instruction_context.get_number_of_program_accounts());
let orig_data_lens = invoke_context
.get_orig_account_lengths()
.map_err(SyscallError::InstructionError)?;
if orig_data_len_index < orig_data_lens.len() {
caller_account.original_data_len = *orig_data_lens
.get(orig_data_len_index)
.ok_or(SyscallError::InvalidLength)?;
} else {
ic_msg!(
invoke_context,
"Internal error: index mismatch for account {}",
account_key
);
return Err(SyscallError::InstructionError(
InstructionError::MissingAccount,
)
.into());
}
Some(caller_account)
} else {
None
};
accounts.push((instruction_account.index_in_transaction, caller_account));
} else {
ic_msg!(
invoke_context,
"Instruction references an unknown account {}",
account_key
);
return Err(
SyscallError::InstructionError(InstructionError::MissingAccount).into(),
);
}
Ok(())
},
SyscallError::InstructionError(InstructionError::NotEnoughAccountKeys).into(),
)?;
Ok(accounts)
}
fn check_instruction_size(
num_accounts: usize,
data_len: usize,
invoke_context: &mut InvokeContext,
) -> Result<(), EbpfError<BpfError>> {
let size = num_accounts
.saturating_mul(size_of::<AccountMeta>())
.saturating_add(data_len);
let max_size = invoke_context.get_compute_budget().max_cpi_instruction_size;
if size > max_size {
return Err(SyscallError::InstructionTooLarge(size, max_size).into());
}
Ok(())
}
fn check_account_infos(
len: usize,
invoke_context: &mut InvokeContext,
) -> Result<(), EbpfError<BpfError>> {
let adjusted_len = if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
len.saturating_mul(size_of::<Pubkey>())
} else {
#[allow(clippy::integer_arithmetic)]
{
len * size_of::<Pubkey>()
}
};
if adjusted_len > invoke_context.get_compute_budget().max_cpi_instruction_size {
// Cap the number of account_infos a caller can pass to approximate
// maximum that accounts that could be passed in an instruction
return Err(SyscallError::TooManyAccounts.into());
};
Ok(())
}
fn check_authorized_program(
program_id: &Pubkey,
instruction_data: &[u8],
invoke_context: &InvokeContext,
) -> Result<(), EbpfError<BpfError>> {
#[allow(clippy::blocks_in_if_conditions)]
if native_loader::check_id(program_id)
|| bpf_loader::check_id(program_id)
|| bpf_loader_deprecated::check_id(program_id)
|| (bpf_loader_upgradeable::check_id(program_id)
&& !(bpf_loader_upgradeable::is_upgrade_instruction(instruction_data)
|| bpf_loader_upgradeable::is_set_authority_instruction(instruction_data)
|| bpf_loader_upgradeable::is_close_instruction(instruction_data)))
|| (invoke_context
.feature_set
.is_active(&prevent_calling_precompiles_as_programs::id())
&& is_precompile(program_id, |feature_id: &Pubkey| {
invoke_context.feature_set.is_active(feature_id)
}))
{
return Err(SyscallError::ProgramNotSupported(*program_id).into());
}
Ok(())
}
/// Call process instruction, common to both Rust and C
fn call<'a, 'b: 'a>(
syscall: &mut dyn SyscallInvokeSigned<'a, 'b>,
instruction_addr: u64,
account_infos_addr: u64,
account_infos_len: u64,
signers_seeds_addr: u64,
signers_seeds_len: u64,
memory_mapping: &MemoryMapping,
) -> Result<u64, EbpfError<BpfError>> {
let mut invoke_context = syscall.get_context_mut()?;
invoke_context
.get_compute_meter()
.consume(invoke_context.get_compute_budget().invoke_units)?;
let do_support_realloc = invoke_context
.feature_set
.is_active(&do_support_realloc::id());
// Translate and verify caller's data
let instruction =
syscall.translate_instruction(instruction_addr, memory_mapping, *invoke_context)?;
let transaction_context = &invoke_context.transaction_context;
let instruction_context = transaction_context
.get_current_instruction_context()
.map_err(SyscallError::InstructionError)?;
let caller_program_id = instruction_context
.get_program_key(transaction_context)
.map_err(SyscallError::InstructionError)?;
let signers = syscall.translate_signers(
caller_program_id,
signers_seeds_addr,
signers_seeds_len,
memory_mapping,
*invoke_context,
)?;
let (instruction_accounts, program_indices) = invoke_context
.prepare_instruction(&instruction, &signers)
.map_err(SyscallError::InstructionError)?;
check_authorized_program(&instruction.program_id, &instruction.data, *invoke_context)?;
let mut accounts = syscall.translate_accounts(
&instruction_accounts,
&program_indices,
account_infos_addr,
account_infos_len,
memory_mapping,
*invoke_context,
)?;
// Process instruction
let mut compute_units_consumed = 0;
invoke_context
.process_instruction(
&instruction.data,
&instruction_accounts,
&program_indices,
&mut compute_units_consumed,
&mut ExecuteTimings::default(),
)
.map_err(SyscallError::InstructionError)?;
// Copy results back to caller
for (callee_account_index, caller_account) in accounts.iter_mut() {
if let Some(caller_account) = caller_account {
let callee_account = invoke_context
.transaction_context
.get_account_at_index(*callee_account_index)
.map_err(SyscallError::InstructionError)?
.borrow();
*caller_account.lamports = callee_account.lamports();
*caller_account.owner = *callee_account.owner();
let new_len = callee_account.data().len();
if caller_account.data.len() != new_len {
if !do_support_realloc && !caller_account.data.is_empty() {
// Only support for `CreateAccount` at this time.
// Need a way to limit total realloc size across multiple CPI calls
ic_msg!(
invoke_context,
"Inner instructions do not support realloc, only SystemProgram::CreateAccount",
);
return Err(
SyscallError::InstructionError(InstructionError::InvalidRealloc).into(),
);
}
let data_overflow = if do_support_realloc {
if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
new_len
> caller_account
.original_data_len
.saturating_add(MAX_PERMITTED_DATA_INCREASE)
} else {
#[allow(clippy::integer_arithmetic)]
{
new_len > caller_account.original_data_len + MAX_PERMITTED_DATA_INCREASE
}
}
} else if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
new_len
> caller_account
.data
.len()
.saturating_add(MAX_PERMITTED_DATA_INCREASE)
} else {
#[allow(clippy::integer_arithmetic)]
{
new_len > caller_account.data.len() + MAX_PERMITTED_DATA_INCREASE
}
};
if data_overflow {
ic_msg!(
invoke_context,
"Account data size realloc limited to {} in inner instructions",
MAX_PERMITTED_DATA_INCREASE
);
return Err(
SyscallError::InstructionError(InstructionError::InvalidRealloc).into(),
);
}
if new_len < caller_account.data.len() {
caller_account
.data
.get_mut(new_len..)
.ok_or(SyscallError::InstructionError(
InstructionError::AccountDataTooSmall,
))?
.fill(0);
}
caller_account.data = translate_slice_mut::<u8>(
memory_mapping,
caller_account.vm_data_addr,
new_len as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;
*caller_account.ref_to_len_in_vm = new_len as u64;
*caller_account.serialized_len_ptr = new_len as u64;
}
let to_slice = &mut caller_account.data;
let from_slice = callee_account
.data()
.get(0..new_len)
.ok_or(SyscallError::InvalidLength)?;
if to_slice.len() != from_slice.len() {
return Err(
SyscallError::InstructionError(InstructionError::AccountDataTooSmall).into(),
);
}
to_slice.copy_from_slice(from_slice);
}
}
Ok(SUCCESS)
}
declare_syscall!(
/// Set return data
SyscallSetReturnData,
fn call(
&mut self,
addr: u64,
len: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let mut invoke_context = question_mark!(
self.invoke_context
.try_borrow_mut()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let budget = invoke_context.get_compute_budget();
let cost = if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
len.saturating_div(budget.cpi_bytes_per_unit)
.saturating_add(budget.syscall_base_cost)
} else {
#[allow(clippy::integer_arithmetic)]
{
len / budget.cpi_bytes_per_unit + budget.syscall_base_cost
}
};
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
if len > MAX_RETURN_DATA as u64 {
*result = Err(SyscallError::ReturnDataTooLarge(len, MAX_RETURN_DATA as u64).into());
return;
}
let return_data = if len == 0 {
Vec::new()
} else {
question_mark!(
translate_slice::<u8>(
memory_mapping,
addr,
len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
)
.to_vec()
};
let transaction_context = &mut invoke_context.transaction_context;
let program_id = *question_mark!(
transaction_context
.get_current_instruction_context()
.and_then(
|instruction_context| instruction_context.get_program_key(transaction_context)
)
.map_err(SyscallError::InstructionError),
result
);
question_mark!(
transaction_context
.set_return_data(program_id, return_data)
.map_err(SyscallError::InstructionError),
result
);
*result = Ok(0);
}
);
declare_syscall!(
/// Get return data
SyscallGetReturnData,
fn call(
&mut self,
return_data_addr: u64,
mut length: u64,
program_id_addr: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let budget = invoke_context.get_compute_budget();
question_mark!(
invoke_context
.get_compute_meter()
.consume(budget.syscall_base_cost),
result
);
let (program_id, return_data) = invoke_context.transaction_context.get_return_data();
length = length.min(return_data.len() as u64);
if length != 0 {
let cost = if invoke_context
.feature_set
.is_active(&syscall_saturated_math::id())
{
length
.saturating_add(size_of::<Pubkey>() as u64)
.saturating_div(budget.cpi_bytes_per_unit)
} else {
#[allow(clippy::integer_arithmetic)]
{
(length + size_of::<Pubkey>() as u64) / budget.cpi_bytes_per_unit
}
};
question_mark!(invoke_context.get_compute_meter().consume(cost), result);
let return_data_result = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
return_data_addr,
length,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let to_slice = return_data_result;
let from_slice = question_mark!(
return_data
.get(..length as usize)
.ok_or(SyscallError::InvokeContextBorrowFailed),
result
);
if to_slice.len() != from_slice.len() {
*result = Err(SyscallError::InvalidLength.into());
return;
}
to_slice.copy_from_slice(from_slice);
let program_id_result = question_mark!(
translate_type_mut::<Pubkey>(
memory_mapping,
program_id_addr,
invoke_context.get_check_aligned()
),
result
);
*program_id_result = *program_id;
}
// Return the actual length, rather the length returned
*result = Ok(return_data.len() as u64);
}
);
declare_syscall!(
/// Log data handling
SyscallLogData,
fn call(
&mut self,
addr: u64,
len: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let budget = invoke_context.get_compute_budget();
question_mark!(
invoke_context
.get_compute_meter()
.consume(budget.syscall_base_cost),
result
);
let untranslated_fields = question_mark!(
translate_slice::<&[u8]>(
memory_mapping,
addr,
len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
question_mark!(
invoke_context.get_compute_meter().consume(
budget
.syscall_base_cost
.saturating_mul(untranslated_fields.len() as u64)
),
result
);
question_mark!(
invoke_context.get_compute_meter().consume(
untranslated_fields
.iter()
.fold(0, |total, e| total.saturating_add(e.len() as u64))
),
result
);
let mut fields = Vec::with_capacity(untranslated_fields.len());
for untranslated_field in untranslated_fields {
fields.push(question_mark!(
translate_slice::<u8>(
memory_mapping,
untranslated_field.as_ptr() as *const _ as u64,
untranslated_field.len() as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
));
}
let log_collector = invoke_context.get_log_collector();
stable_log::program_data(&log_collector, &fields);
*result = Ok(0);
}
);
declare_syscall!(
/// Get a processed sigling instruction
SyscallGetProcessedSiblingInstruction,
fn call(
&mut self,
index: u64,
meta_addr: u64,
program_id_addr: u64,
data_addr: u64,
accounts_addr: u64,
memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let budget = invoke_context.get_compute_budget();
question_mark!(
invoke_context
.get_compute_meter()
.consume(budget.syscall_base_cost),
result
);
let stack_height = invoke_context.get_stack_height();
let instruction_trace = invoke_context.transaction_context.get_instruction_trace();
let instruction_context = if stack_height == TRANSACTION_LEVEL_STACK_HEIGHT {
// pick one of the top-level instructions
instruction_trace
.len()
.checked_sub(2)
.and_then(|result| result.checked_sub(index as usize))
.and_then(|index| instruction_trace.get(index))
.and_then(|instruction_list| instruction_list.first())
} else {
// Walk the last list of inner instructions
instruction_trace.last().and_then(|inners| {
let mut current_index = 0;
inners.iter().rev().skip(1).find(|instruction_context| {
if stack_height == instruction_context.get_stack_height() {
if index == current_index {
return true;
} else {
current_index = current_index.saturating_add(1);
}
}
false
})
})
};
if let Some(instruction_context) = instruction_context {
let ProcessedSiblingInstruction {
data_len,
accounts_len,
} = question_mark!(
translate_type_mut::<ProcessedSiblingInstruction>(
memory_mapping,
meta_addr,
invoke_context.get_check_aligned(),
),
result
);
if *data_len == instruction_context.get_instruction_data().len()
&& *accounts_len == instruction_context.get_number_of_instruction_accounts()
{
let program_id = question_mark!(
translate_type_mut::<Pubkey>(
memory_mapping,
program_id_addr,
invoke_context.get_check_aligned()
),
result
);
let data = question_mark!(
translate_slice_mut::<u8>(
memory_mapping,
data_addr,
*data_len as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
let accounts = question_mark!(
translate_slice_mut::<AccountMeta>(
memory_mapping,
accounts_addr,
*accounts_len as u64,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
),
result
);
*program_id =
instruction_context.get_program_id(invoke_context.transaction_context);
data.clone_from_slice(instruction_context.get_instruction_data());
let account_metas = question_mark!(
(instruction_context.get_number_of_program_accounts()
..instruction_context.get_number_of_accounts())
.map(|index_in_instruction| Ok(AccountMeta {
pubkey: *invoke_context.get_key_of_account_at_index(
instruction_context
.get_index_in_transaction(index_in_instruction)?
)?,
is_signer: instruction_context.is_signer(index_in_instruction)?,
is_writable: instruction_context.is_writable(index_in_instruction)?,
}))
.collect::<Result<Vec<_>, InstructionError>>()
.map_err(SyscallError::InstructionError),
result
);
accounts.clone_from_slice(account_metas.as_slice());
}
*data_len = instruction_context.get_instruction_data().len();
*accounts_len = instruction_context.get_number_of_instruction_accounts();
*result = Ok(true as u64);
return;
}
*result = Ok(false as u64);
}
);
declare_syscall!(
/// Get current call stack height
SyscallGetStackHeight,
fn call(
&mut self,
_arg1: u64,
_arg2: u64,
_arg3: u64,
_arg4: u64,
_arg5: u64,
_memory_mapping: &MemoryMapping,
result: &mut Result<u64, EbpfError<BpfError>>,
) {
let invoke_context = question_mark!(
self.invoke_context
.try_borrow()
.map_err(|_| SyscallError::InvokeContextBorrowFailed),
result
);
let budget = invoke_context.get_compute_budget();
question_mark!(
invoke_context
.get_compute_meter()
.consume(budget.syscall_base_cost),
result
);
*result = Ok(invoke_context.get_stack_height() as u64);
}
);
#[cfg(test)]
mod tests {
#[allow(deprecated)]
use solana_sdk::sysvar::fees::Fees;
use {
super::*,
solana_program_runtime::{invoke_context::InvokeContext, sysvar_cache::SysvarCache},
solana_rbpf::{
ebpf::HOST_ALIGN, memory_region::MemoryRegion, user_error::UserError, vm::Config,
},
solana_sdk::{
account::AccountSharedData,
bpf_loader,
fee_calculator::FeeCalculator,
hash::hashv,
program::check_type_assumptions,
sysvar::{clock::Clock, epoch_schedule::EpochSchedule, rent::Rent},
transaction_context::TransactionContext,
},
std::{borrow::Cow, str::FromStr},
};
macro_rules! assert_access_violation {
($result:expr, $va:expr, $len:expr) => {
match $result {
Err(EbpfError::AccessViolation(_, _, va, len, _)) if $va == va && $len == len => (),
Err(EbpfError::StackAccessViolation(_, _, va, len, _))
if $va == va && $len == len => {}
_ => panic!(),
}
};
}
macro_rules! prepare_mockup {
($invoke_context:ident,
$transaction_context:ident,
$program_key:ident,
$loader_key:expr $(,)?) => {
let $program_key = Pubkey::new_unique();
let mut $transaction_context = TransactionContext::new(
vec![
(
$loader_key,
AccountSharedData::new(0, 0, &native_loader::id()),
),
($program_key, AccountSharedData::new(0, 0, &$loader_key)),
],
1,
1,
);
let mut $invoke_context = InvokeContext::new_mock(&mut $transaction_context, &[]);
$invoke_context.push(&[], &[0, 1], &[]).unwrap();
};
}
#[allow(dead_code)]
struct MockSlice {
pub vm_addr: u64,
pub len: usize,
}
#[test]
fn test_translate() {
const START: u64 = 0x100000000;
const LENGTH: u64 = 1000;
let data = vec![0u8; LENGTH as usize];
let addr = data.as_ptr() as u64;
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion::new_readonly(&data, START),
],
&config,
)
.unwrap();
let cases = vec![
(true, START, 0, addr),
(true, START, 1, addr),
(true, START, LENGTH, addr),
(true, START + 1, LENGTH - 1, addr + 1),
(false, START + 1, LENGTH, 0),
(true, START + LENGTH - 1, 1, addr + LENGTH - 1),
(true, START + LENGTH, 0, addr + LENGTH),
(false, START + LENGTH, 1, 0),
(false, START, LENGTH + 1, 0),
(false, 0, 0, 0),
(false, 0, 1, 0),
(false, START - 1, 0, 0),
(false, START - 1, 1, 0),
(true, START + LENGTH / 2, LENGTH / 2, addr + LENGTH / 2),
];
for (ok, start, length, value) in cases {
if ok {
assert_eq!(
translate(&memory_mapping, AccessType::Load, start, length).unwrap(),
value
)
} else {
assert!(translate(&memory_mapping, AccessType::Load, start, length).is_err())
}
}
}
#[test]
fn test_translate_type() {
// Pubkey
let pubkey = solana_sdk::pubkey::new_rand();
let addr = &pubkey as *const _ as u64;
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: std::mem::size_of::<Pubkey>() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
let translated_pubkey =
translate_type::<Pubkey>(&memory_mapping, 0x100000000, true).unwrap();
assert_eq!(pubkey, *translated_pubkey);
// Instruction
let instruction = Instruction::new_with_bincode(
solana_sdk::pubkey::new_rand(),
&"foobar",
vec![AccountMeta::new(solana_sdk::pubkey::new_rand(), false)],
);
let addr = &instruction as *const _ as u64;
let mut memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: std::mem::size_of::<Instruction>() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
let translated_instruction =
translate_type::<Instruction>(&memory_mapping, 0x100000000, true).unwrap();
assert_eq!(instruction, *translated_instruction);
memory_mapping.resize_region::<BpfError>(1, 1).unwrap();
assert!(translate_type::<Instruction>(&memory_mapping, 0x100000000, true).is_err());
}
#[test]
fn test_translate_slice() {
// zero len
let good_data = vec![1u8, 2, 3, 4, 5];
let data: Vec<u8> = vec![];
assert_eq!(0x1 as *const u8, data.as_ptr());
let addr = good_data.as_ptr() as *const _ as u64;
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: good_data.len() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
let translated_data =
translate_slice::<u8>(&memory_mapping, data.as_ptr() as u64, 0, true, true).unwrap();
assert_eq!(data, translated_data);
assert_eq!(0, translated_data.len());
// u8
let mut data = vec![1u8, 2, 3, 4, 5];
let addr = data.as_ptr() as *const _ as u64;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: data.len() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
let translated_data =
translate_slice::<u8>(&memory_mapping, 0x100000000, data.len() as u64, true, true)
.unwrap();
assert_eq!(data, translated_data);
*data.first_mut().unwrap() = 10;
assert_eq!(data, translated_data);
assert!(
translate_slice::<u8>(&memory_mapping, data.as_ptr() as u64, u64::MAX, true, true)
.is_err()
);
assert!(translate_slice::<u8>(
&memory_mapping,
0x100000000 - 1,
data.len() as u64,
true,
true
)
.is_err());
// u64
let mut data = vec![1u64, 2, 3, 4, 5];
let addr = data.as_ptr() as *const _ as u64;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: (data.len() * size_of::<u64>()) as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
let translated_data =
translate_slice::<u64>(&memory_mapping, 0x100000000, data.len() as u64, true, true)
.unwrap();
assert_eq!(data, translated_data);
*data.first_mut().unwrap() = 10;
assert_eq!(data, translated_data);
assert!(
translate_slice::<u64>(&memory_mapping, 0x100000000, u64::MAX, true, true).is_err()
);
// Pubkeys
let mut data = vec![solana_sdk::pubkey::new_rand(); 5];
let addr = data.as_ptr() as *const _ as u64;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: (data.len() * std::mem::size_of::<Pubkey>()) as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
let translated_data =
translate_slice::<Pubkey>(&memory_mapping, 0x100000000, data.len() as u64, true, true)
.unwrap();
assert_eq!(data, translated_data);
*data.first_mut().unwrap() = solana_sdk::pubkey::new_rand(); // Both should point to same place
assert_eq!(data, translated_data);
}
#[test]
fn test_translate_string_and_do() {
let string = "Gaggablaghblagh!";
let addr = string.as_ptr() as *const _ as u64;
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: string.len() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
assert_eq!(
42,
translate_string_and_do(
&memory_mapping,
0x100000000,
string.len() as u64,
true,
true,
&mut |string: &str| {
assert_eq!(string, "Gaggablaghblagh!");
Ok(42)
}
)
.unwrap()
);
}
#[test]
#[should_panic(expected = "UserError(SyscallError(Abort))")]
fn test_syscall_abort() {
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let config = Config::default();
let memory_mapping =
MemoryMapping::new::<UserError>(vec![MemoryRegion::default()], &config).unwrap();
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
SyscallAbort::call(
&mut SyscallAbort {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
},
0,
0,
0,
0,
0,
&memory_mapping,
&mut result,
);
result.unwrap();
}
#[test]
#[should_panic(expected = "UserError(SyscallError(Panic(\"Gaggablaghblagh!\", 42, 84)))")]
fn test_syscall_sol_panic() {
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let mut syscall_panic = SyscallPanic {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let string = "Gaggablaghblagh!";
let addr = string.as_ptr() as *const _ as u64;
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: string.len() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
syscall_panic
.invoke_context
.borrow_mut()
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(string.len() as u64 - 1);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_panic.call(
0x100000000,
string.len() as u64,
42,
84,
0,
&memory_mapping,
&mut result,
);
assert_eq!(
Err(EbpfError::UserError(BpfError::SyscallError(
SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded)
))),
result
);
syscall_panic
.invoke_context
.borrow_mut()
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(string.len() as u64);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_panic.call(
0x100000000,
string.len() as u64,
42,
84,
0,
&memory_mapping,
&mut result,
);
result.unwrap();
}
#[test]
fn test_syscall_sol_log() {
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let mut syscall_sol_log = SyscallLog {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let string = "Gaggablaghblagh!";
let addr = string.as_ptr() as *const _ as u64;
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: string.len() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
syscall_sol_log
.invoke_context
.borrow_mut()
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(400 - 1);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_log.call(
0x100000001, // AccessViolation
string.len() as u64,
0,
0,
0,
&memory_mapping,
&mut result,
);
assert_access_violation!(result, 0x100000001, string.len() as u64);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_log.call(
0x100000000,
string.len() as u64 * 2, // AccessViolation
0,
0,
0,
&memory_mapping,
&mut result,
);
assert_access_violation!(result, 0x100000000, string.len() as u64 * 2);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_log.call(
0x100000000,
string.len() as u64,
0,
0,
0,
&memory_mapping,
&mut result,
);
result.unwrap();
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_log.call(
0x100000000,
string.len() as u64,
0,
0,
0,
&memory_mapping,
&mut result,
);
assert_eq!(
Err(EbpfError::UserError(BpfError::SyscallError(
SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded)
))),
result
);
assert_eq!(
syscall_sol_log
.invoke_context
.borrow()
.get_log_collector()
.unwrap()
.borrow()
.get_recorded_content(),
&["Program log: Gaggablaghblagh!".to_string()]
);
}
#[test]
fn test_syscall_sol_log_u64() {
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let cost = invoke_context.get_compute_budget().log_64_units;
let mut syscall_sol_log_u64 = SyscallLogU64 {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
syscall_sol_log_u64
.invoke_context
.borrow_mut()
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost);
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(vec![], &config).unwrap();
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_log_u64.call(1, 2, 3, 4, 5, &memory_mapping, &mut result);
result.unwrap();
assert_eq!(
syscall_sol_log_u64
.invoke_context
.borrow()
.get_log_collector()
.unwrap()
.borrow()
.get_recorded_content(),
&["Program log: 0x1, 0x2, 0x3, 0x4, 0x5".to_string()]
);
}
#[test]
fn test_syscall_sol_pubkey() {
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let cost = invoke_context.get_compute_budget().log_pubkey_units;
let mut syscall_sol_pubkey = SyscallLogPubkey {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let pubkey = Pubkey::from_str("MoqiU1vryuCGQSxFKA1SZ316JdLEFFhoAu6cKUNk7dN").unwrap();
let addr = pubkey.as_ref().first().unwrap() as *const _ as u64;
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: addr,
vm_addr: 0x100000000,
len: 32,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_pubkey.call(
0x100000001, // AccessViolation
32,
0,
0,
0,
&memory_mapping,
&mut result,
);
assert_access_violation!(result, 0x100000001, 32);
syscall_sol_pubkey
.invoke_context
.borrow_mut()
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(1);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_pubkey.call(100, 32, 0, 0, 0, &memory_mapping, &mut result);
assert_eq!(
Err(EbpfError::UserError(BpfError::SyscallError(
SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded)
))),
result
);
syscall_sol_pubkey
.invoke_context
.borrow_mut()
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall_sol_pubkey.call(0x100000000, 0, 0, 0, 0, &memory_mapping, &mut result);
result.unwrap();
assert_eq!(
syscall_sol_pubkey
.invoke_context
.borrow()
.get_log_collector()
.unwrap()
.borrow()
.get_recorded_content(),
&["Program log: MoqiU1vryuCGQSxFKA1SZ316JdLEFFhoAu6cKUNk7dN".to_string()]
);
}
#[test]
fn test_syscall_sol_alloc_free() {
let config = Config::default();
// large alloc
{
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN);
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START),
MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096),
MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
MemoryRegion::new_writable(&mut [], ebpf::MM_INPUT_START),
],
&config,
)
.unwrap();
invoke_context
.set_allocator(Rc::new(RefCell::new(BpfAllocator::new(
heap,
ebpf::MM_HEAP_START,
))))
.unwrap();
let mut syscall = SyscallAllocFree {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result);
assert_ne!(result.unwrap(), 0);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result);
assert_eq!(result.unwrap(), 0);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(u64::MAX, 0, 0, 0, 0, &memory_mapping, &mut result);
assert_eq!(result.unwrap(), 0);
}
// many small unaligned allocs
{
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN);
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START),
MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096),
MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
MemoryRegion::new_writable(&mut [], ebpf::MM_INPUT_START),
],
&config,
)
.unwrap();
invoke_context
.set_allocator(Rc::new(RefCell::new(BpfAllocator::new(
heap,
ebpf::MM_HEAP_START,
))))
.unwrap();
invoke_context.set_check_aligned(false);
let mut syscall = SyscallAllocFree {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
for _ in 0..100 {
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(1, 0, 0, 0, 0, &memory_mapping, &mut result);
assert_ne!(result.unwrap(), 0);
}
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result);
assert_eq!(result.unwrap(), 0);
}
// many small aligned allocs
{
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN);
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START),
MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096),
MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
MemoryRegion::new_writable(&mut [], ebpf::MM_INPUT_START),
],
&config,
)
.unwrap();
invoke_context
.set_allocator(Rc::new(RefCell::new(BpfAllocator::new(
heap,
ebpf::MM_HEAP_START,
))))
.unwrap();
let mut syscall = SyscallAllocFree {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
for _ in 0..12 {
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(1, 0, 0, 0, 0, &memory_mapping, &mut result);
assert_ne!(result.unwrap(), 0);
}
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result);
assert_eq!(result.unwrap(), 0);
}
// aligned allocs
fn aligned<T>() {
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN);
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START),
MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096),
MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
MemoryRegion::new_writable(&mut [], ebpf::MM_INPUT_START),
],
&config,
)
.unwrap();
invoke_context
.set_allocator(Rc::new(RefCell::new(BpfAllocator::new(
heap,
ebpf::MM_HEAP_START,
))))
.unwrap();
let mut syscall = SyscallAllocFree {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(
size_of::<u8>() as u64,
0,
0,
0,
0,
&memory_mapping,
&mut result,
);
let address = result.unwrap();
assert_ne!(address, 0);
assert_eq!(
(address as *const u8 as usize).wrapping_rem(align_of::<u8>()),
0
);
}
aligned::<u8>();
aligned::<u16>();
aligned::<u32>();
aligned::<u64>();
aligned::<u128>();
}
#[test]
fn test_syscall_sha256() {
let config = Config::default();
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader_deprecated::id(),
);
let bytes1 = "Gaggablaghblagh!";
let bytes2 = "flurbos";
let mock_slice1 = MockSlice {
vm_addr: 0x300000000,
len: bytes1.len(),
};
let mock_slice2 = MockSlice {
vm_addr: 0x400000000,
len: bytes2.len(),
};
let bytes_to_hash = [mock_slice1, mock_slice2];
let hash_result = [0; HASH_BYTES];
let ro_len = bytes_to_hash.len() as u64;
let ro_va = 0x100000000;
let rw_va = 0x200000000;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: bytes_to_hash.as_ptr() as *const _ as u64,
vm_addr: ro_va,
len: 32,
vm_gap_shift: 63,
is_writable: false,
},
MemoryRegion {
host_addr: hash_result.as_ptr() as *const _ as u64,
vm_addr: rw_va,
len: HASH_BYTES as u64,
vm_gap_shift: 63,
is_writable: true,
},
MemoryRegion {
host_addr: bytes1.as_ptr() as *const _ as u64,
vm_addr: bytes_to_hash[0].vm_addr,
len: bytes1.len() as u64,
vm_gap_shift: 63,
is_writable: false,
},
MemoryRegion {
host_addr: bytes2.as_ptr() as *const _ as u64,
vm_addr: bytes_to_hash[1].vm_addr,
len: bytes2.len() as u64,
vm_gap_shift: 63,
is_writable: false,
},
],
&config,
)
.unwrap();
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(
(invoke_context.get_compute_budget().sha256_base_cost
+ invoke_context.get_compute_budget().mem_op_base_cost.max(
invoke_context
.get_compute_budget()
.sha256_byte_cost
.saturating_mul((bytes1.len() + bytes2.len()) as u64 / 2),
))
* 4,
);
let mut syscall = SyscallSha256 {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(ro_va, ro_len, rw_va, 0, 0, &memory_mapping, &mut result);
result.unwrap();
let hash_local = hashv(&[bytes1.as_ref(), bytes2.as_ref()]).to_bytes();
assert_eq!(hash_result, hash_local);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(
ro_va - 1, // AccessViolation
ro_len,
rw_va,
0,
0,
&memory_mapping,
&mut result,
);
assert_access_violation!(result, ro_va - 1, 32);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(
ro_va,
ro_len + 1, // AccessViolation
rw_va,
0,
0,
&memory_mapping,
&mut result,
);
assert_access_violation!(result, ro_va, 48);
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(
ro_va,
ro_len,
rw_va - 1, // AccessViolation
0,
0,
&memory_mapping,
&mut result,
);
assert_access_violation!(result, rw_va - 1, HASH_BYTES as u64);
syscall.call(ro_va, ro_len, rw_va, 0, 0, &memory_mapping, &mut result);
assert_eq!(
Err(EbpfError::UserError(BpfError::SyscallError(
SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded)
))),
result
);
}
#[test]
#[allow(deprecated)]
fn test_syscall_get_sysvar() {
let config = Config::default();
let src_clock = Clock {
slot: 1,
epoch_start_timestamp: 2,
epoch: 3,
leader_schedule_epoch: 4,
unix_timestamp: 5,
};
let src_epochschedule = EpochSchedule {
slots_per_epoch: 1,
leader_schedule_slot_offset: 2,
warmup: false,
first_normal_epoch: 3,
first_normal_slot: 4,
};
let src_fees = Fees {
fee_calculator: FeeCalculator {
lamports_per_signature: 1,
},
};
let src_rent = Rent {
lamports_per_byte_year: 1,
exemption_threshold: 2.0,
burn_percent: 3,
};
let mut sysvar_cache = SysvarCache::default();
sysvar_cache.set_clock(src_clock.clone());
sysvar_cache.set_epoch_schedule(src_epochschedule);
sysvar_cache.set_fees(src_fees.clone());
sysvar_cache.set_rent(src_rent);
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
invoke_context.sysvar_cache = Cow::Owned(sysvar_cache);
// Test clock sysvar
{
let got_clock = Clock::default();
let got_clock_va = 0x100000000;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: &got_clock as *const _ as u64,
vm_addr: got_clock_va,
len: size_of::<Clock>() as u64,
vm_gap_shift: 63,
is_writable: true,
},
],
&config,
)
.unwrap();
let mut syscall = SyscallGetClockSysvar {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(got_clock_va, 0, 0, 0, 0, &memory_mapping, &mut result);
result.unwrap();
assert_eq!(got_clock, src_clock);
}
// Test epoch_schedule sysvar
{
let got_epochschedule = EpochSchedule::default();
let got_epochschedule_va = 0x100000000;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: &got_epochschedule as *const _ as u64,
vm_addr: got_epochschedule_va,
len: size_of::<EpochSchedule>() as u64,
vm_gap_shift: 63,
is_writable: true,
},
],
&config,
)
.unwrap();
let mut syscall = SyscallGetEpochScheduleSysvar {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(
got_epochschedule_va,
0,
0,
0,
0,
&memory_mapping,
&mut result,
);
result.unwrap();
assert_eq!(got_epochschedule, src_epochschedule);
}
// Test fees sysvar
{
let got_fees = Fees::default();
let got_fees_va = 0x100000000;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: &got_fees as *const _ as u64,
vm_addr: got_fees_va,
len: size_of::<Fees>() as u64,
vm_gap_shift: 63,
is_writable: true,
},
],
&config,
)
.unwrap();
let mut syscall = SyscallGetFeesSysvar {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(got_fees_va, 0, 0, 0, 0, &memory_mapping, &mut result);
result.unwrap();
assert_eq!(got_fees, src_fees);
}
// Test rent sysvar
{
let got_rent = Rent::default();
let got_rent_va = 0x100000000;
let memory_mapping = MemoryMapping::new::<UserError>(
vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: &got_rent as *const _ as u64,
vm_addr: got_rent_va,
len: size_of::<Rent>() as u64,
vm_gap_shift: 63,
is_writable: true,
},
],
&config,
)
.unwrap();
let mut syscall = SyscallGetRentSysvar {
invoke_context: Rc::new(RefCell::new(&mut invoke_context)),
};
let mut result: Result<u64, EbpfError<BpfError>> = Ok(0);
syscall.call(got_rent_va, 0, 0, 0, 0, &memory_mapping, &mut result);
result.unwrap();
assert_eq!(got_rent, src_rent);
}
}
#[test]
fn test_overlapping() {
assert!(!check_overlapping_do_not_use(10, 7, 3));
assert!(check_overlapping_do_not_use(10, 8, 3));
assert!(check_overlapping_do_not_use(10, 9, 3));
assert!(check_overlapping_do_not_use(10, 10, 3));
assert!(check_overlapping_do_not_use(10, 11, 3));
assert!(check_overlapping_do_not_use(10, 12, 3));
assert!(!check_overlapping_do_not_use(10, 13, 3));
}
fn call_program_address_common(
seeds: &[&[u8]],
program_id: &Pubkey,
syscall: &mut dyn SyscallObject<BpfError>,
) -> Result<(Pubkey, u8), EbpfError<BpfError>> {
const SEEDS_VA: u64 = 0x100000000;
const PROGRAM_ID_VA: u64 = 0x200000000;
const ADDRESS_VA: u64 = 0x300000000;
const BUMP_SEED_VA: u64 = 0x400000000;
const SEED_VA: u64 = 0x500000000;
let config = Config::default();
let address = Pubkey::default();
let bump_seed = 0;
let mut mock_slices = Vec::with_capacity(seeds.len());
let mut regions = vec![
MemoryRegion::default(),
MemoryRegion {
host_addr: mock_slices.as_ptr() as u64,
vm_addr: SEEDS_VA,
len: (seeds.len().saturating_mul(size_of::<MockSlice>()) as u64),
vm_gap_shift: 63,
is_writable: false,
},
MemoryRegion {
host_addr: program_id.as_ref().as_ptr() as u64,
vm_addr: PROGRAM_ID_VA,
len: 32,
vm_gap_shift: 63,
is_writable: false,
},
MemoryRegion {
host_addr: address.as_ref().as_ptr() as u64,
vm_addr: ADDRESS_VA,
len: 32,
vm_gap_shift: 63,
is_writable: true,
},
MemoryRegion {
host_addr: &bump_seed as *const u8 as u64,
vm_addr: BUMP_SEED_VA,
len: 32,
vm_gap_shift: 63,
is_writable: true,
},
];
for (i, seed) in seeds.iter().enumerate() {
let vm_addr = SEED_VA.saturating_add((i as u64).saturating_mul(0x100000000));
let mock_slice = MockSlice {
vm_addr,
len: seed.len(),
};
mock_slices.push(mock_slice);
regions.push(MemoryRegion {
host_addr: seed.as_ptr() as u64,
vm_addr,
len: seed.len() as u64,
vm_gap_shift: 63,
is_writable: false,
});
}
let memory_mapping = MemoryMapping::new::<UserError>(regions, &config).unwrap();
let mut result = Ok(0);
syscall.call(
SEEDS_VA,
seeds.len() as u64,
PROGRAM_ID_VA,
ADDRESS_VA,
BUMP_SEED_VA,
&memory_mapping,
&mut result,
);
let _ = result?;
Ok((address, bump_seed))
}
fn create_program_address(
invoke_context: &mut InvokeContext,
seeds: &[&[u8]],
address: &Pubkey,
) -> Result<Pubkey, EbpfError<BpfError>> {
let mut syscall = SyscallCreateProgramAddress {
invoke_context: Rc::new(RefCell::new(invoke_context)),
};
let (address, _) = call_program_address_common(seeds, address, &mut syscall)?;
Ok(address)
}
fn try_find_program_address(
invoke_context: &mut InvokeContext,
seeds: &[&[u8]],
address: &Pubkey,
) -> Result<(Pubkey, u8), EbpfError<BpfError>> {
let mut syscall = SyscallTryFindProgramAddress {
invoke_context: Rc::new(RefCell::new(invoke_context)),
};
call_program_address_common(seeds, address, &mut syscall)
}
#[test]
fn test_create_program_address() {
// These tests duplicate the direct tests in solana_program::pubkey
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let address = bpf_loader_upgradeable::id();
let exceeded_seed = &[127; MAX_SEED_LEN + 1];
let result = create_program_address(&mut invoke_context, &[exceeded_seed], &address);
assert_eq!(
result,
Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into())
);
assert_eq!(
create_program_address(
&mut invoke_context,
&[b"short_seed", exceeded_seed],
&address,
),
Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into())
);
let max_seed = &[0; MAX_SEED_LEN];
assert!(create_program_address(&mut invoke_context, &[max_seed], &address).is_ok());
let exceeded_seeds: &[&[u8]] = &[
&[1],
&[2],
&[3],
&[4],
&[5],
&[6],
&[7],
&[8],
&[9],
&[10],
&[11],
&[12],
&[13],
&[14],
&[15],
&[16],
];
assert!(create_program_address(&mut invoke_context, exceeded_seeds, &address).is_ok());
let max_seeds: &[&[u8]] = &[
&[1],
&[2],
&[3],
&[4],
&[5],
&[6],
&[7],
&[8],
&[9],
&[10],
&[11],
&[12],
&[13],
&[14],
&[15],
&[16],
&[17],
];
assert_eq!(
create_program_address(&mut invoke_context, max_seeds, &address),
Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into())
);
assert_eq!(
create_program_address(&mut invoke_context, &[b"", &[1]], &address),
Ok("BwqrghZA2htAcqq8dzP1WDAhTXYTYWj7CHxF5j7TDBAe"
.parse()
.unwrap())
);
assert_eq!(
create_program_address(&mut invoke_context, &["☉".as_ref(), &[0]], &address),
Ok("13yWmRpaTR4r5nAktwLqMpRNr28tnVUZw26rTvPSSB19"
.parse()
.unwrap())
);
assert_eq!(
create_program_address(&mut invoke_context, &[b"Talking", b"Squirrels"], &address),
Ok("2fnQrngrQT4SeLcdToJAD96phoEjNL2man2kfRLCASVk"
.parse()
.unwrap())
);
let public_key = Pubkey::from_str("SeedPubey1111111111111111111111111111111111").unwrap();
assert_eq!(
create_program_address(&mut invoke_context, &[public_key.as_ref(), &[1]], &address),
Ok("976ymqVnfE32QFe6NfGDctSvVa36LWnvYxhU6G2232YL"
.parse()
.unwrap())
);
assert_ne!(
create_program_address(&mut invoke_context, &[b"Talking", b"Squirrels"], &address)
.unwrap(),
create_program_address(&mut invoke_context, &[b"Talking"], &address).unwrap(),
);
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(0);
assert_eq!(
create_program_address(&mut invoke_context, &[b"", &[1]], &address),
Err(
SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded)
.into()
)
);
}
#[test]
fn test_find_program_address() {
prepare_mockup!(
invoke_context,
transaction_context,
program_id,
bpf_loader::id(),
);
let cost = invoke_context
.get_compute_budget()
.create_program_address_units;
let address = bpf_loader_upgradeable::id();
let max_tries = 256; // one per seed
for _ in 0..1_000 {
let address = Pubkey::new_unique();
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost * max_tries);
let (found_address, bump_seed) =
try_find_program_address(&mut invoke_context, &[b"Lil'", b"Bits"], &address)
.unwrap();
assert_eq!(
found_address,
create_program_address(
&mut invoke_context,
&[b"Lil'", b"Bits", &[bump_seed]],
&address,
)
.unwrap()
);
}
let seeds: &[&[u8]] = &[b""];
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost * max_tries);
let (_, bump_seed) =
try_find_program_address(&mut invoke_context, seeds, &address).unwrap();
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost * (max_tries - bump_seed as u64));
try_find_program_address(&mut invoke_context, seeds, &address).unwrap();
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost * (max_tries - bump_seed as u64 - 1));
assert_eq!(
try_find_program_address(&mut invoke_context, seeds, &address),
Err(
SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded)
.into()
)
);
let exceeded_seed = &[127; MAX_SEED_LEN + 1];
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost * (max_tries - 1));
assert_eq!(
try_find_program_address(&mut invoke_context, &[exceeded_seed], &address),
Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into())
);
let exceeded_seeds: &[&[u8]] = &[
&[1],
&[2],
&[3],
&[4],
&[5],
&[6],
&[7],
&[8],
&[9],
&[10],
&[11],
&[12],
&[13],
&[14],
&[15],
&[16],
&[17],
];
invoke_context
.get_compute_meter()
.borrow_mut()
.mock_set_remaining(cost * (max_tries - 1));
assert_eq!(
try_find_program_address(&mut invoke_context, exceeded_seeds, &address),
Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into())
);
}
#[test]
fn test_check_type_assumptions() {
check_type_assumptions();
}
}
| 33.216074 | 103 | 0.522328 |
0a6a215c129601876be025d0cb8495557facc141 | 3,878 | extern crate rayon;
use rayon::prelude::*;
use std::fmt::Debug;
fn check<I>(iter: I)
where I: ParallelIterator + Debug
{
println!("{:?}", iter);
}
#[test]
fn debug_binary_heap() {
use std::collections::BinaryHeap;
let heap: BinaryHeap<_> = (0..10).collect();
check(heap.par_iter());
check(heap.into_par_iter());
}
#[test]
fn debug_btree_map() {
use std::collections::BTreeMap;
let mut map: BTreeMap<_,_> = (0..10).enumerate().collect();
check(map.par_iter());
check(map.par_iter_mut());
check(map.into_par_iter());
}
#[test]
fn debug_btree_set() {
use std::collections::BTreeSet;
let set: BTreeSet<_> = (0..10).collect();
check(set.par_iter());
check(set.into_par_iter());
}
#[test]
fn debug_hash_map() {
use std::collections::HashMap;
let mut map: HashMap<_,_> = (0..10).enumerate().collect();
check(map.par_iter());
check(map.par_iter_mut());
check(map.into_par_iter());
}
#[test]
fn debug_hash_set() {
use std::collections::HashSet;
let set: HashSet<_> = (0..10).collect();
check(set.par_iter());
check(set.into_par_iter());
}
#[test]
fn debug_linked_list() {
use std::collections::LinkedList;
let mut list: LinkedList<_> = (0..10).collect();
check(list.par_iter());
check(list.par_iter_mut());
check(list.into_par_iter());
}
#[test]
fn debug_vec_deque() {
use std::collections::VecDeque;
let mut deque: VecDeque<_> = (0..10).collect();
check(deque.par_iter());
check(deque.par_iter_mut());
check(deque.into_par_iter());
}
#[test]
fn debug_option() {
let mut option = Some(0);
check(option.par_iter());
check(option.par_iter_mut());
check(option.into_par_iter());
}
#[test]
fn debug_result() {
let mut result = Ok::<_, ()>(0);
check(result.par_iter());
check(result.par_iter_mut());
check(result.into_par_iter());
}
#[test]
fn debug_range() {
check((0..10).into_par_iter());
}
#[test]
fn debug_str() {
let s = "a b c d\ne f g";
check(s.par_chars());
check(s.par_lines());
check(s.par_split('\n'));
check(s.par_split_terminator('\n'));
check(s.par_split_whitespace());
}
#[test]
fn debug_vec() {
let mut v: Vec<_> = (0..10).collect();
check(v.par_iter());
check(v.par_iter_mut());
check(v.par_chunks(42));
check(v.par_chunks_mut(42));
check(v.par_windows(42));
check(v.par_split(|x| x % 3 == 0));
check(v.par_split_mut(|x| x % 3 == 0));
check(v.into_par_iter());
}
#[test]
fn debug_adaptors() {
let v: Vec<_> = (0..10).collect();
check(v.par_iter().chain(&v));
check(v.par_iter().cloned());
check(v.par_iter().enumerate());
check(v.par_iter().filter(|_| true));
check(v.par_iter().filter_map(|x| Some(x)));
check(v.par_iter().flat_map(|x| Some(x)));
check(v.par_iter().map(Some).flatten());
check(v.par_iter().fold(|| 0, |x, _| x));
check(v.par_iter().fold_with(0, |x, _| x));
check(v.par_iter().inspect(|_| ()));
check(v.par_iter().interleave(&v));
check(v.par_iter().interleave_shortest(&v));
check(v.par_iter().intersperse(&-1));
check(v.par_iter().map(|x| x));
check(v.par_iter().map_with(0, |_, x| x));
check(v.par_iter().rev());
check(v.par_iter().skip(1));
check(v.par_iter().take(1));
check(v.par_iter().map(Some).while_some());
check(v.par_iter().with_max_len(1));
check(v.par_iter().with_min_len(1));
check(v.par_iter().zip(&v));
check(v.par_iter().zip_eq(&v));
}
#[test]
fn debug_empty() {
check(rayon::iter::empty::<i32>());
}
#[test]
fn debug_once() {
check(rayon::iter::once(10));
}
#[test]
fn debug_repeat() {
let x: Option<i32> = None;
check(rayon::iter::repeat(x));
check(rayon::iter::repeatn(x, 10));
}
#[test]
fn debug_splitter() {
check(rayon::iter::split((0..10), |x| (x, None)));
}
| 23.361446 | 63 | 0.601599 |
abc4974a3f4d2f17e255238a6757a783917fb334 | 33,223 | //! A multi-producer, multi-consumer broadcast queue. Each sent value is seen by
//! all consumers.
//!
//! A [`Sender`] is used to broadcast values to **all** connected [`Receiver`]
//! values. [`Sender`] handles are clone-able, allowing concurrent send and
//! receive actions. [`Sender`] and [`Receiver`] are both `Send` and `Sync` as
//! long as `T` is also `Send` or `Sync` respectively.
//!
//! When a value is sent, **all** [`Receiver`] handles are notified and will
//! receive the value. The value is stored once inside the channel and cloned on
//! demand for each receiver. Once all receivers have received a clone of the
//! value, the value is released from the channel.
//!
//! A channel is created by calling [`channel`], specifying the maximum number
//! of messages the channel can retain at any given time.
//!
//! New [`Receiver`] handles are created by calling [`Sender::subscribe`]. The
//! returned [`Receiver`] will receive values sent **after** the call to
//! `subscribe`.
//!
//! ## Lagging
//!
//! As sent messages must be retained until **all** [`Receiver`] handles receive
//! a clone, broadcast channels are suspectible to the "slow receiver" problem.
//! In this case, all but one receiver are able to receive values at the rate
//! they are sent. Because one receiver is stalled, the channel starts to fill
//! up.
//!
//! This broadcast channel implementation handles this case by setting a hard
//! upper bound on the number of values the channel may retain at any given
//! time. This upper bound is passed to the [`channel`] function as an argument.
//!
//! If a value is sent when the channel is at capacity, the oldest value
//! currently held by the channel is released. This frees up space for the new
//! value. Any receiver that has not yet seen the released value will return
//! [`RecvError::Lagged`] the next time [`recv`] is called.
//!
//! Once [`RecvError::Lagged`] is returned, the lagging receiver's position is
//! updated to the oldest value contained by the channel. The next call to
//! [`recv`] will return this value.
//!
//! This behavior enables a receiver to detect when it has lagged so far behind
//! that data has been dropped. The caller may decide how to respond to this:
//! either by aborting its task or by tolerating lost messages and resuming
//! consumption of the channel.
//!
//! ## Closing
//!
//! When **all** [`Sender`] handles have been dropped, no new values may be
//! sent. At this point, the channel is "closed". Once a receiver has received
//! all values retained by the channel, the next call to [`recv`] will return
//! with [`RecvError::Closed`].
//!
//! [`Sender`]: crate::sync::broadcast::Sender
//! [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe
//! [`Receiver`]: crate::sync::broadcast::Receiver
//! [`channel`]: crate::sync::broadcast::channel
//! [`RecvError::Lagged`]: crate::sync::broadcast::RecvError::Lagged
//! [`RecvError::Closed`]: crate::sync::broadcast::RecvError::Closed
//! [`recv`]: crate::sync::broadcast::Receiver::recv
//!
//! # Examples
//!
//! Basic usage
//!
//! ```
//! use tokio::sync::broadcast;
//!
//! #[tokio::main]
//! async fn main() {
//! let (tx, mut rx1) = broadcast::channel(16);
//! let mut rx2 = tx.subscribe();
//!
//! tokio::spawn(async move {
//! assert_eq!(rx1.recv().await.unwrap(), 10);
//! assert_eq!(rx1.recv().await.unwrap(), 20);
//! });
//!
//! tokio::spawn(async move {
//! assert_eq!(rx2.recv().await.unwrap(), 10);
//! assert_eq!(rx2.recv().await.unwrap(), 20);
//! });
//!
//! tx.send(10).unwrap();
//! tx.send(20).unwrap();
//! }
//! ```
//!
//! Handling lag
//!
//! ```
//! use tokio::sync::broadcast;
//!
//! #[tokio::main]
//! async fn main() {
//! let (tx, mut rx) = broadcast::channel(2);
//!
//! tx.send(10).unwrap();
//! tx.send(20).unwrap();
//! tx.send(30).unwrap();
//!
//! // The receiver lagged behind
//! assert!(rx.recv().await.is_err());
//!
//! // At this point, we can abort or continue with lost messages
//!
//! assert_eq!(20, rx.recv().await.unwrap());
//! assert_eq!(30, rx.recv().await.unwrap());
//! }
use crate::loom::cell::UnsafeCell;
use crate::loom::future::AtomicWaker;
use crate::loom::sync::atomic::{spin_loop_hint, AtomicBool, AtomicPtr, AtomicUsize};
use crate::loom::sync::{Arc, Condvar, Mutex};
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::atomic::Ordering::SeqCst;
use std::task::{Context, Poll, Waker};
use std::usize;
/// Sending-half of the [`broadcast`] channel.
///
/// May be used from many threads. Messages can be sent with
/// [`send`][Sender::send].
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// }
/// ```
///
/// [`broadcast`]: crate::sync::broadcast
pub struct Sender<T> {
shared: Arc<Shared<T>>,
}
/// Receiving-half of the [`broadcast`] channel.
///
/// Must not be used concurrently. Messages may be retrieved using
/// [`recv`][Receiver::recv].
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// }
/// ```
///
/// [`broadcast`]: crate::sync::broadcast
pub struct Receiver<T> {
/// State shared with all receivers and senders.
shared: Arc<Shared<T>>,
/// Next position to read from
next: u64,
/// Waiter state
wait: Arc<WaitNode>,
}
/// Error returned by [`Sender::send`][Sender::send].
///
/// A **send** operation can only fail if there are no active receivers,
/// implying that the message could never be received. The error contains the
/// message being sent as a payload so it can be recovered.
#[derive(Debug)]
pub struct SendError<T>(pub T);
/// An error returned from the [`recv`] function on a [`Receiver`].
///
/// [`recv`]: crate::sync::broadcast::Receiver::recv
/// [`Receiver`]: crate::sync::broadcast::Receiver
#[derive(Debug, PartialEq)]
pub enum RecvError {
/// There are no more active senders implying no further messages will ever
/// be sent.
Closed,
/// The receiver lagged too far behind. Attempting to receive again will
/// return the oldest message still retained by the channel.
///
/// Includes the number of skipped messages.
Lagged(u64),
}
/// An error returned from the [`try_recv`] function on a [`Receiver`].
///
/// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv
/// [`Receiver`]: crate::sync::broadcast::Receiver
#[derive(Debug, PartialEq)]
pub enum TryRecvError {
/// The channel is currently empty. There are still active
/// [`Sender`][Sender] handles, so data may yet become available.
Empty,
/// There are no more active senders implying no further messages will ever
/// be sent.
Closed,
/// The receiver lagged too far behind and has been forcibly disconnected.
/// Attempting to receive again will return the oldest message still
/// retained by the channel.
///
/// Includes the number of skipped messages.
Lagged(u64),
}
/// Data shared between senders and receivers
struct Shared<T> {
/// slots in the channel
buffer: Box<[Slot<T>]>,
/// Mask a position -> index
mask: usize,
/// Tail of the queue
tail: Mutex<Tail>,
/// Notifies a sender that the slot is unlocked
condvar: Condvar,
/// Stack of pending waiters
wait_stack: AtomicPtr<WaitNode>,
/// Number of outstanding Sender handles
num_tx: AtomicUsize,
}
/// Next position to write a value
struct Tail {
/// Next position to write to
pos: u64,
/// Number of active receivers
rx_cnt: usize,
/// True if the channel is closed
closed: bool,
}
/// Slot in the buffer
struct Slot<T> {
/// Remaining number of receivers that are expected to see this value.
///
/// When this goes to zero, the value is released.
rem: AtomicUsize,
/// Used to lock the `write` field.
lock: AtomicUsize,
/// The value being broadcast
///
/// Synchronized by `state`
write: Write<T>,
}
/// A write in the buffer
struct Write<T> {
/// Uniquely identifies this write
pos: UnsafeCell<u64>,
/// The written value
val: UnsafeCell<Option<T>>,
}
/// Tracks a waiting receiver
#[derive(Debug)]
struct WaitNode {
/// `true` if queued
queued: AtomicBool,
/// Task to wake when a permit is made available.
waker: AtomicWaker,
/// Next pointer in the stack of waiting senders.
next: UnsafeCell<*const WaitNode>,
}
struct RecvGuard<'a, T> {
slot: &'a Slot<T>,
tail: &'a Mutex<Tail>,
condvar: &'a Condvar,
}
/// Max number of receivers. Reserve space to lock.
const MAX_RECEIVERS: usize = usize::MAX >> 2;
const CLOSED: usize = 1;
const WRITER: usize = 2;
const READER: usize = 4;
/// Create a bounded, multi-producer, multi-consumer channel where each sent
/// value is broadcasted to all active receivers.
///
/// All data sent on [`Sender`] will become available on every active
/// [`Receiver`] in the same order as it was sent.
///
/// The `Sender` can be cloned to `send` to the same channel from multiple
/// points in the process or it can be used concurrently from an `Arc`. New
/// `Receiver` handles are created by calling [`Sender::subscribe`].
///
/// If all [`Receiver`] handles are dropped, the `send` method will return a
/// [`SendError`]. Similarly, if all [`Sender`] handles are dropped, the [`recv`]
/// method will return a [`RecvError`].
///
/// [`Sender`]: crate::sync::broadcast::Sender
/// [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe
/// [`Receiver`]: crate::sync::broadcast::Receiver
/// [`recv`]: crate::sync::broadcast::Receiver::recv
/// [`SendError`]: crate::sync::broadcast::SendError
/// [`RecvError`]: crate::sync::broadcast::RecvError
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// }
/// ```
pub fn channel<T>(mut capacity: usize) -> (Sender<T>, Receiver<T>) {
assert!(capacity > 0, "capacity is empty");
assert!(capacity <= usize::MAX >> 1, "requested capacity too large");
// Round to a power of two
capacity = capacity.next_power_of_two();
let mut buffer = Vec::with_capacity(capacity);
for i in 0..capacity {
buffer.push(Slot {
rem: AtomicUsize::new(0),
lock: AtomicUsize::new(0),
write: Write {
pos: UnsafeCell::new((i as u64).wrapping_sub(capacity as u64)),
val: UnsafeCell::new(None),
},
});
}
let shared = Arc::new(Shared {
buffer: buffer.into_boxed_slice(),
mask: capacity - 1,
tail: Mutex::new(Tail {
pos: 0,
rx_cnt: 1,
closed: false,
}),
condvar: Condvar::new(),
wait_stack: AtomicPtr::new(ptr::null_mut()),
num_tx: AtomicUsize::new(1),
});
let rx = Receiver {
shared: shared.clone(),
next: 0,
wait: Arc::new(WaitNode {
queued: AtomicBool::new(false),
waker: AtomicWaker::new(),
next: UnsafeCell::new(ptr::null()),
}),
};
let tx = Sender { shared };
(tx, rx)
}
unsafe impl<T: Send> Send for Sender<T> {}
unsafe impl<T: Send> Sync for Sender<T> {}
unsafe impl<T: Send> Send for Receiver<T> {}
unsafe impl<T: Send> Sync for Receiver<T> {}
impl<T> Sender<T> {
/// Attempts to send a value to all active [`Receiver`] handles, returning
/// it back if it could not be sent.
///
/// A successful send occurs when there is at least one active [`Receiver`]
/// handle. An unsuccessful send would be one where all associated
/// [`Receiver`] handles have already been dropped.
///
/// # Return
///
/// On success, the number of subscribed [`Receiver`] handles is returned.
/// This does not mean that this number of receivers will see the message as
/// a receiver may drop before receiving the message.
///
/// # Note
///
/// A return value of `Ok` **does not** mean that the sent value will be
/// observed by all or any of the active [`Receiver`] handles. [`Receiver`]
/// handles may be dropped before receiving the sent message.
///
/// A return value of `Err` **does not** mean that future calls to `send`
/// will fail. New [`Receiver`] handles may be created by calling
/// [`subscribe`].
///
/// [`Receiver`]: crate::sync::broadcast::Receiver
/// [`subscribe`]: crate::sync::broadcast::Sender::subscribe
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// }
/// ```
pub fn send(&self, value: T) -> Result<usize, SendError<T>> {
self.send2(Some(value))
.map_err(|SendError(maybe_v)| SendError(maybe_v.unwrap()))
}
/// Creates a new [`Receiver`] handle that will receive values sent **after**
/// this call to `subscribe`.
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, _rx) = broadcast::channel(16);
///
/// // Will not be seen
/// tx.send(10).unwrap();
///
/// let mut rx = tx.subscribe();
///
/// tx.send(20).unwrap();
///
/// let value = rx.recv().await.unwrap();
/// assert_eq!(20, value);
/// }
/// ```
pub fn subscribe(&self) -> Receiver<T> {
let shared = self.shared.clone();
let mut tail = shared.tail.lock().unwrap();
if tail.rx_cnt == MAX_RECEIVERS {
panic!("max receivers");
}
tail.rx_cnt = tail.rx_cnt.checked_add(1).expect("overflow");
let next = tail.pos;
drop(tail);
Receiver {
shared,
next,
wait: Arc::new(WaitNode {
queued: AtomicBool::new(false),
waker: AtomicWaker::new(),
next: UnsafeCell::new(ptr::null()),
}),
}
}
/// Returns the number of active receivers
///
/// An active receiver is a [`Receiver`] handle returned from [`channel`] or
/// [`subscribe`]. These are the handles that will receive values sent on
/// this [`Sender`].
///
/// # Note
///
/// It is not guaranteed that a sent message will reach this number of
/// receivers. Active receivers may never call [`recv`] again before
/// dropping.
///
/// [`recv`]: crate::sync::broadcast::Receiver::recv
/// [`Receiver`]: crate::sync::broadcast::Receiver
/// [`Sender`]: crate::sync::broadcast::Sender
/// [`subscribe`]: crate::sync::broadcast::Sender::subscribe
/// [`channel`]: crate::sync::broadcast::channel
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, _rx1) = broadcast::channel(16);
///
/// assert_eq!(1, tx.receiver_count());
///
/// let mut _rx2 = tx.subscribe();
///
/// assert_eq!(2, tx.receiver_count());
///
/// tx.send(10).unwrap();
/// }
/// ```
pub fn receiver_count(&self) -> usize {
let tail = self.shared.tail.lock().unwrap();
tail.rx_cnt
}
fn send2(&self, value: Option<T>) -> Result<usize, SendError<Option<T>>> {
let mut tail = self.shared.tail.lock().unwrap();
if tail.rx_cnt == 0 {
return Err(SendError(value));
}
// Position to write into
let pos = tail.pos;
let rem = tail.rx_cnt;
let idx = (pos & self.shared.mask as u64) as usize;
// Update the tail position
tail.pos = tail.pos.wrapping_add(1);
// Get the slot
let slot = &self.shared.buffer[idx];
// Acquire the write lock
let mut prev = slot.lock.fetch_or(WRITER, SeqCst);
while prev & !WRITER != 0 {
// Concurrent readers, we must go to sleep
tail = self.shared.condvar.wait(tail).unwrap();
prev = slot.lock.load(SeqCst);
if prev & WRITER == 0 {
// The writer lock bit was cleared while this thread was
// sleeping. This can only happen if a newer write happened on
// this slot by another thread. Bail early as an optimization,
// there is nothing left to do.
return Ok(rem);
}
}
if tail.pos.wrapping_sub(pos) > self.shared.buffer.len() as u64 {
// There is a newer pending write to the same slot.
return Ok(rem);
}
// Slot lock acquired
slot.write.pos.with_mut(|ptr| unsafe { *ptr = pos });
// Set remaining receivers
slot.rem.store(rem, SeqCst);
// Set the closed bit if the value is `None`; otherwise write the value
if value.is_none() {
tail.closed = true;
slot.lock.store(CLOSED, SeqCst);
} else {
slot.write.val.with_mut(|ptr| unsafe { *ptr = value });
slot.lock.store(0, SeqCst);
}
// Release the mutex. This must happen after the slot lock is released,
// otherwise the writer lock bit could be cleared while another thread
// is in the critical section.
drop(tail);
// Notify waiting receivers
self.notify_rx();
Ok(rem)
}
fn notify_rx(&self) {
let mut curr = self.shared.wait_stack.swap(ptr::null_mut(), SeqCst) as *const WaitNode;
while !curr.is_null() {
let waiter = unsafe { Arc::from_raw(curr) };
// Update `curr` before toggling `queued` and waking
curr = waiter.next.with(|ptr| unsafe { *ptr });
// Unset queued
waiter.queued.store(false, SeqCst);
// Wake
waiter.waker.wake();
}
}
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Sender<T> {
let shared = self.shared.clone();
shared.num_tx.fetch_add(1, SeqCst);
Sender { shared }
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
if 1 == self.shared.num_tx.fetch_sub(1, SeqCst) {
let _ = self.send2(None);
}
}
}
impl<T> Receiver<T> {
/// Locks the next value if there is one.
///
/// The caller is responsible for unlocking
fn recv_ref(&mut self, spin: bool) -> Result<RecvGuard<'_, T>, TryRecvError> {
let idx = (self.next & self.shared.mask as u64) as usize;
// The slot holding the next value to read
let slot = &self.shared.buffer[idx];
// Lock the slot
if !slot.try_rx_lock() {
if spin {
while !slot.try_rx_lock() {
spin_loop_hint();
}
} else {
return Err(TryRecvError::Empty);
}
}
let guard = RecvGuard {
slot,
tail: &self.shared.tail,
condvar: &self.shared.condvar,
};
if guard.pos() != self.next {
let pos = guard.pos();
// The receiver has read all current values in the channel
if pos.wrapping_add(self.shared.buffer.len() as u64) == self.next {
guard.drop_no_rem_dec();
return Err(TryRecvError::Empty);
}
let tail = self.shared.tail.lock().unwrap();
// `tail.pos` points to the slot that the **next** send writes to. If
// the channel is closed, the previous slot is the oldest value.
let mut adjust = 0;
if tail.closed {
adjust = 1
}
let next = tail
.pos
.wrapping_sub(self.shared.buffer.len() as u64 + adjust);
let missed = next.wrapping_sub(self.next);
drop(tail);
// The receiver is slow but no values have been missed
if missed == 0 {
self.next = self.next.wrapping_add(1);
return Ok(guard);
}
guard.drop_no_rem_dec();
self.next = next;
return Err(TryRecvError::Lagged(missed));
}
self.next = self.next.wrapping_add(1);
// If the `CLOSED` bit it set on the slot, the channel is closed
//
// `try_rx_lock` could check for this and bail early. If it's return
// value was changed to represent the state of the lock, it could
// match on being closed, empty, or available for reading.
if slot.lock.load(SeqCst) & CLOSED == CLOSED {
guard.drop_no_rem_dec();
return Err(TryRecvError::Closed);
}
Ok(guard)
}
}
impl<T> Receiver<T>
where
T: Clone,
{
/// Attempts to return a pending value on this receiver without awaiting.
///
/// This is useful for a flavor of "optimistic check" before deciding to
/// await on a receiver.
///
/// Compared with [`recv`], this function has three failure cases instead of one
/// (one for closed, one for an empty buffer, one for a lagging receiver).
///
/// `Err(TryRecvError::Closed)` is returned when all `Sender` halves have
/// dropped, indicating that no further values can be sent on the channel.
///
/// If the [`Receiver`] handle falls behind, once the channel is full, newly
/// sent values will overwrite old values. At this point, a call to [`recv`]
/// will return with `Err(TryRecvError::Lagged)` and the [`Receiver`]'s
/// internal cursor is updated to point to the oldest value still held by
/// the channel. A subsequent call to [`try_recv`] will return this value
/// **unless** it has been since overwritten. If there are no values to
/// receive, `Err(TryRecvError::Empty)` is returned.
///
/// [`recv`]: crate::sync::broadcast::Receiver::recv
/// [`Receiver`]: crate::sync::broadcast::Receiver
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx) = broadcast::channel(16);
///
/// assert!(rx.try_recv().is_err());
///
/// tx.send(10).unwrap();
///
/// let value = rx.try_recv().unwrap();
/// assert_eq!(10, value);
/// }
/// ```
pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
let guard = self.recv_ref(false)?;
guard.clone_value().ok_or(TryRecvError::Closed)
}
#[doc(hidden)] // TODO: document
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Result<T, RecvError>> {
if let Some(value) = ok_empty(self.try_recv())? {
return Poll::Ready(Ok(value));
}
self.register_waker(cx.waker());
if let Some(value) = ok_empty(self.try_recv())? {
Poll::Ready(Ok(value))
} else {
Poll::Pending
}
}
/// Receives the next value for this receiver.
///
/// Each [`Receiver`] handle will receive a clone of all values sent
/// **after** it has subscribed.
///
/// `Err(RecvError::Closed)` is returned when all `Sender` halves have
/// dropped, indicating that no further values can be sent on the channel.
///
/// If the [`Receiver`] handle falls behind, once the channel is full, newly
/// sent values will overwrite old values. At this point, a call to [`recv`]
/// will return with `Err(RecvError::Lagged)` and the [`Receiver`]'s
/// internal cursor is updated to point to the oldest value still held by
/// the channel. A subsequent call to [`recv`] will return this value
/// **unless** it has been since overwritten.
///
/// [`Receiver`]: crate::sync::broadcast::Receiver
/// [`recv`]: crate::sync::broadcast::Receiver::recv
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// }
/// ```
///
/// Handling lag
///
/// ```
/// use tokio::sync::broadcast;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx) = broadcast::channel(2);
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// tx.send(30).unwrap();
///
/// // The receiver lagged behind
/// assert!(rx.recv().await.is_err());
///
/// // At this point, we can abort or continue with lost messages
///
/// assert_eq!(20, rx.recv().await.unwrap());
/// assert_eq!(30, rx.recv().await.unwrap());
/// }
pub async fn recv(&mut self) -> Result<T, RecvError> {
use crate::future::poll_fn;
poll_fn(|cx| self.poll_recv(cx)).await
}
fn register_waker(&self, cx: &Waker) {
self.wait.waker.register_by_ref(cx);
if !self.wait.queued.load(SeqCst) {
// Set `queued` before queuing.
self.wait.queued.store(true, SeqCst);
let mut curr = self.shared.wait_stack.load(SeqCst);
// The ref count is decremented in `notify_rx` when all nodes are
// removed from the waiter stack.
let node = Arc::into_raw(self.wait.clone()) as *mut _;
loop {
// Safety: `queued == false` means the caller has exclusive
// access to `self.wait.next`.
self.wait.next.with_mut(|ptr| unsafe { *ptr = curr });
let res = self
.shared
.wait_stack
.compare_exchange(curr, node, SeqCst, SeqCst);
match res {
Ok(_) => return,
Err(actual) => curr = actual,
}
}
}
}
}
#[cfg(feature = "stream")]
impl<T> crate::stream::Stream for Receiver<T>
where
T: Clone,
{
type Item = Result<T, RecvError>;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<T, RecvError>>> {
self.poll_recv(cx).map(|v| match v {
Ok(v) => Some(Ok(v)),
lag @ Err(RecvError::Lagged(_)) => Some(lag),
Err(RecvError::Closed) => None,
})
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
let mut tail = self.shared.tail.lock().unwrap();
tail.rx_cnt -= 1;
let until = tail.pos;
drop(tail);
while self.next != until {
match self.recv_ref(true) {
Ok(_) => {}
// The channel is closed
Err(TryRecvError::Closed) => break,
// Ignore lagging, we will catch up
Err(TryRecvError::Lagged(..)) => {}
// Can't be empty
Err(TryRecvError::Empty) => panic!("unexpected empty broadcast channel"),
}
}
}
}
impl<T> Drop for Shared<T> {
fn drop(&mut self) {
// Clear the wait stack
let mut curr = self.wait_stack.with_mut(|ptr| *ptr as *const WaitNode);
while !curr.is_null() {
let waiter = unsafe { Arc::from_raw(curr) };
curr = waiter.next.with(|ptr| unsafe { *ptr });
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "broadcast::Sender")
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "broadcast::Receiver")
}
}
impl<T> Slot<T> {
/// Tries to lock the slot for a receiver. If `false`, then a sender holds the
/// lock and the calling task will be notified once the sender has released
/// the lock.
fn try_rx_lock(&self) -> bool {
let mut curr = self.lock.load(SeqCst);
loop {
if curr & WRITER == WRITER {
// Locked by sender
return false;
}
// Only increment (by `READER`) if the `WRITER` bit is not set.
let res = self
.lock
.compare_exchange(curr, curr + READER, SeqCst, SeqCst);
match res {
Ok(_) => return true,
Err(actual) => curr = actual,
}
}
}
fn rx_unlock(&self, tail: &Mutex<Tail>, condvar: &Condvar, rem_dec: bool) {
if rem_dec {
// Decrement the remaining counter
if 1 == self.rem.fetch_sub(1, SeqCst) {
// Last receiver, drop the value
self.write.val.with_mut(|ptr| unsafe { *ptr = None });
}
}
if WRITER == self.lock.fetch_sub(READER, SeqCst) - READER {
// First acquire the lock to make sure our sender is waiting on the
// condition variable, otherwise the notification could be lost.
mem::drop(tail.lock().unwrap());
// Wake up senders
condvar.notify_all();
}
}
}
impl<'a, T> RecvGuard<'a, T> {
fn pos(&self) -> u64 {
self.slot.write.pos.with(|ptr| unsafe { *ptr })
}
fn clone_value(&self) -> Option<T>
where
T: Clone,
{
self.slot.write.val.with(|ptr| unsafe { (*ptr).clone() })
}
fn drop_no_rem_dec(self) {
self.slot.rx_unlock(self.tail, self.condvar, false);
mem::forget(self);
}
}
impl<'a, T> Drop for RecvGuard<'a, T> {
fn drop(&mut self) {
self.slot.rx_unlock(self.tail, self.condvar, true)
}
}
fn ok_empty<T>(res: Result<T, TryRecvError>) -> Result<Option<T>, RecvError> {
match res {
Ok(value) => Ok(Some(value)),
Err(TryRecvError::Empty) => Ok(None),
Err(TryRecvError::Lagged(n)) => Err(RecvError::Lagged(n)),
Err(TryRecvError::Closed) => Err(RecvError::Closed),
}
}
impl fmt::Display for RecvError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RecvError::Closed => write!(f, "channel closed"),
RecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt),
}
}
}
impl std::error::Error for RecvError {}
impl fmt::Display for TryRecvError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TryRecvError::Empty => write!(f, "channel empty"),
TryRecvError::Closed => write!(f, "channel closed"),
TryRecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt),
}
}
}
impl std::error::Error for TryRecvError {}
| 30.563937 | 95 | 0.559402 |
2faf251fa9ffc7468b9a01e44d0a9fc86a84a6ea | 179,222 | use std::collections::HashMap;
use std::cell::RefCell;
use std::default::Default;
use std::collections::BTreeMap;
use serde_json as json;
use std::io;
use std::fs;
use std::mem;
use std::thread::sleep;
use crate::client;
// ##############
// UTILITIES ###
// ############
/// Identifies the an OAuth2 authorization scope.
/// A scope is needed when requesting an
/// [authorization token](https://developers.google.com/youtube/v3/guides/authentication).
#[derive(PartialEq, Eq, Hash)]
pub enum Scope {
/// View and manage your data across Google Cloud Platform services
CloudPlatform,
/// View your data across Google Cloud Platform services
CloudPlatformReadOnly,
/// View and manage your Google Compute Engine resources
Compute,
/// View your Google Compute Engine resources
ComputeReadonly,
/// View and manage your Google Cloud Platform management resources and deployment status information
NdevCloudman,
/// View your Google Cloud Platform management resources and deployment status information
NdevCloudmanReadonly,
}
impl AsRef<str> for Scope {
fn as_ref(&self) -> &str {
match *self {
Scope::CloudPlatform => "https://www.googleapis.com/auth/cloud-platform",
Scope::CloudPlatformReadOnly => "https://www.googleapis.com/auth/cloud-platform.read-only",
Scope::Compute => "https://www.googleapis.com/auth/compute",
Scope::ComputeReadonly => "https://www.googleapis.com/auth/compute.readonly",
Scope::NdevCloudman => "https://www.googleapis.com/auth/ndev.cloudman",
Scope::NdevCloudmanReadonly => "https://www.googleapis.com/auth/ndev.cloudman.readonly",
}
}
}
impl Default for Scope {
fn default() -> Scope {
Scope::ComputeReadonly
}
}
// ########
// HUB ###
// ######
/// Central instance to access all Resourceviews related resource activities
///
/// # Examples
///
/// Instantiate a new hub
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// use resourceviews1_beta2::api::ZoneViewsAddResourcesRequest;
/// use resourceviews1_beta2::{Result, Error};
/// # async fn dox() {
/// use std::default::Default;
/// use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// // Get an ApplicationSecret instance by some means. It contains the `client_id` and
/// // `client_secret`, among other things.
/// let secret: oauth2::ApplicationSecret = Default::default();
/// // Instantiate the authenticator. It will choose a suitable authentication flow for you,
/// // unless you replace `None` with the desired Flow.
/// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
/// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
/// // retrieve them from storage.
/// let auth = oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = ZoneViewsAddResourcesRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().add_resources(req, "project", "zone", "resourceView")
/// .doit().await;
///
/// match result {
/// Err(e) => match e {
/// // The Error enum provides details about what exactly happened.
/// // You can also just use its `Debug`, `Display` or `Error` traits
/// Error::HttpError(_)
/// |Error::Io(_)
/// |Error::MissingAPIKey
/// |Error::MissingToken(_)
/// |Error::Cancelled
/// |Error::UploadSizeLimitExceeded(_, _)
/// |Error::Failure(_)
/// |Error::BadRequest(_)
/// |Error::FieldClash(_)
/// |Error::JsonDecodeError(_, _) => println!("{}", e),
/// },
/// Ok(res) => println!("Success: {:?}", res),
/// }
/// # }
/// ```
#[derive(Clone)]
pub struct Resourceviews<> {
pub client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>,
pub auth: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>,
_user_agent: String,
_base_url: String,
_root_url: String,
}
impl<'a, > client::Hub for Resourceviews<> {}
impl<'a, > Resourceviews<> {
pub fn new(client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>, authenticator: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>) -> Resourceviews<> {
Resourceviews {
client,
auth: authenticator,
_user_agent: "google-api-rust-client/3.1.0".to_string(),
_base_url: "https://www.googleapis.com/resourceviews/v1beta2/projects/".to_string(),
_root_url: "https://www.googleapis.com/".to_string(),
}
}
pub fn zone_operations(&'a self) -> ZoneOperationMethods<'a> {
ZoneOperationMethods { hub: &self }
}
pub fn zone_views(&'a self) -> ZoneViewMethods<'a> {
ZoneViewMethods { hub: &self }
}
/// Set the user-agent header field to use in all requests to the server.
/// It defaults to `google-api-rust-client/3.1.0`.
///
/// Returns the previously set user-agent.
pub fn user_agent(&mut self, agent_name: String) -> String {
mem::replace(&mut self._user_agent, agent_name)
}
/// Set the base url to use in all requests to the server.
/// It defaults to `https://www.googleapis.com/resourceviews/v1beta2/projects/`.
///
/// Returns the previously set base url.
pub fn base_url(&mut self, new_base_url: String) -> String {
mem::replace(&mut self._base_url, new_base_url)
}
/// Set the root url to use in all requests to the server.
/// It defaults to `https://www.googleapis.com/`.
///
/// Returns the previously set root url.
pub fn root_url(&mut self, new_root_url: String) -> String {
mem::replace(&mut self._root_url, new_root_url)
}
}
// ############
// SCHEMAS ###
// ##########
/// The Label to be applied to the resource views.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Label {
/// Key of the label.
pub key: Option<String>,
/// Value of the label.
pub value: Option<String>,
}
impl client::Part for Label {}
/// The list response item that contains the resource and end points information.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListResourceResponseItem {
/// The list of service end points on the resource.
pub endpoints: Option<HashMap<String, Vec<i32>>>,
/// The full URL of the resource.
pub resource: Option<String>,
}
impl client::Part for ListResourceResponseItem {}
/// An operation resource, used to manage asynchronous API requests.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get zone operations](ZoneOperationGetCall) (response)
/// * [add resources zone views](ZoneViewAddResourceCall) (response)
/// * [delete zone views](ZoneViewDeleteCall) (response)
/// * [insert zone views](ZoneViewInsertCall) (response)
/// * [remove resources zone views](ZoneViewRemoveResourceCall) (response)
/// * [set service zone views](ZoneViewSetServiceCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Operation {
/// [Output only] An optional identifier specified by the client when the mutation was initiated. Must be unique for all operation resources in the project.
#[serde(rename="clientOperationId")]
pub client_operation_id: Option<String>,
/// [Output Only] The time that this operation was requested, in RFC3339 text format.
#[serde(rename="creationTimestamp")]
pub creation_timestamp: Option<String>,
/// [Output Only] The time that this operation was completed, in RFC3339 text format.
#[serde(rename="endTime")]
pub end_time: Option<String>,
/// [Output Only] If errors occurred during processing of this operation, this field will be populated.
pub error: Option<OperationError>,
/// [Output only] If operation fails, the HTTP error message returned.
#[serde(rename="httpErrorMessage")]
pub http_error_message: Option<String>,
/// [Output only] If operation fails, the HTTP error status code returned.
#[serde(rename="httpErrorStatusCode")]
pub http_error_status_code: Option<i32>,
/// [Output Only] Unique identifier for the resource, generated by the server.
pub id: Option<String>,
/// [Output Only] The time that this operation was requested, in RFC3339 text format.
#[serde(rename="insertTime")]
pub insert_time: Option<String>,
/// [Output only] Type of the resource.
pub kind: Option<String>,
/// [Output Only] Name of the resource.
pub name: Option<String>,
/// [Output only] Type of the operation. Operations include insert, update, and delete.
#[serde(rename="operationType")]
pub operation_type: Option<String>,
/// [Output only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess at when the operation will be complete. This number should be monotonically increasing as the operation progresses.
pub progress: Option<i32>,
/// [Output Only] URL of the region where the operation resides. Only available when performing regional operations.
pub region: Option<String>,
/// [Output Only] Server-defined fully-qualified URL for this resource.
#[serde(rename="selfLink")]
pub self_link: Option<String>,
/// [Output Only] The time that this operation was started by the server, in RFC3339 text format.
#[serde(rename="startTime")]
pub start_time: Option<String>,
/// [Output Only] Status of the operation.
pub status: Option<String>,
/// [Output Only] An optional textual description of the current status of the operation.
#[serde(rename="statusMessage")]
pub status_message: Option<String>,
/// [Output Only] Unique target ID which identifies a particular incarnation of the target.
#[serde(rename="targetId")]
pub target_id: Option<String>,
/// [Output only] URL of the resource the operation is mutating.
#[serde(rename="targetLink")]
pub target_link: Option<String>,
/// [Output Only] User who requested the operation, for example: [email protected].
pub user: Option<String>,
/// [Output Only] If there are issues with this operation, a warning is returned.
pub warnings: Option<Vec<OperationWarnings>>,
/// [Output Only] URL of the zone where the operation resides. Only available when performing per-zone operations.
pub zone: Option<String>,
}
impl client::ResponseResult for Operation {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [list zone operations](ZoneOperationListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct OperationList {
/// Unique identifier for the resource; defined by the server (output only).
pub id: Option<String>,
/// The operation resources.
pub items: Option<Vec<Operation>>,
/// Type of resource.
pub kind: Option<String>,
/// A token used to continue a truncated list request (output only).
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
/// Server defined URL for this resource (output only).
#[serde(rename="selfLink")]
pub self_link: Option<String>,
}
impl client::ResponseResult for OperationList {}
/// The resource view object.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get zone views](ZoneViewGetCall) (response)
/// * [insert zone views](ZoneViewInsertCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ResourceView {
/// The creation time of the resource view.
#[serde(rename="creationTimestamp")]
pub creation_timestamp: Option<String>,
/// The detailed description of the resource view.
pub description: Option<String>,
/// Services endpoint information.
pub endpoints: Option<Vec<ServiceEndpoint>>,
/// The fingerprint of the service endpoint information.
pub fingerprint: Option<String>,
/// [Output Only] The ID of the resource view.
pub id: Option<String>,
/// Type of the resource.
pub kind: Option<String>,
/// The labels for events.
pub labels: Option<Vec<Label>>,
/// The name of the resource view.
pub name: Option<String>,
/// The URL of a Compute Engine network to which the resources in the view belong.
pub network: Option<String>,
/// A list of all resources in the resource view.
pub resources: Option<Vec<String>>,
/// [Output Only] A self-link to the resource view.
#[serde(rename="selfLink")]
pub self_link: Option<String>,
/// The total number of resources in the resource view.
pub size: Option<u32>,
}
impl client::RequestValue for ResourceView {}
impl client::ResponseResult for ResourceView {}
/// The service endpoint that may be started in a VM.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ServiceEndpoint {
/// The name of the service endpoint.
pub name: Option<String>,
/// The port of the service endpoint.
pub port: Option<i32>,
}
impl client::Part for ServiceEndpoint {}
/// The request to add resources to the resource view.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [add resources zone views](ZoneViewAddResourceCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ZoneViewsAddResourcesRequest {
/// The list of resources to be added.
pub resources: Option<Vec<String>>,
}
impl client::RequestValue for ZoneViewsAddResourcesRequest {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get service zone views](ZoneViewGetServiceCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ZoneViewsGetServiceResponse {
/// The service information.
pub endpoints: Option<Vec<ServiceEndpoint>>,
/// The fingerprint of the service information.
pub fingerprint: Option<String>,
}
impl client::ResponseResult for ZoneViewsGetServiceResponse {}
/// The response to a list request.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [list zone views](ZoneViewListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ZoneViewsList {
/// The result that contains all resource views that meet the criteria.
pub items: Option<Vec<ResourceView>>,
/// Type of resource.
pub kind: Option<String>,
/// A token used for pagination.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
/// Server defined URL for this resource (output only).
#[serde(rename="selfLink")]
pub self_link: Option<String>,
}
impl client::ResponseResult for ZoneViewsList {}
/// The response to a list resource request.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [list resources zone views](ZoneViewListResourceCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ZoneViewsListResourcesResponse {
/// The formatted JSON that is requested by the user.
pub items: Option<Vec<ListResourceResponseItem>>,
/// The URL of a Compute Engine network to which the resources in the view belong.
pub network: Option<String>,
/// A token used for pagination.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
}
impl client::ResponseResult for ZoneViewsListResourcesResponse {}
/// The request to remove resources from the resource view.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [remove resources zone views](ZoneViewRemoveResourceCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ZoneViewsRemoveResourcesRequest {
/// The list of resources to be removed.
pub resources: Option<Vec<String>>,
}
impl client::RequestValue for ZoneViewsRemoveResourcesRequest {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [set service zone views](ZoneViewSetServiceCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ZoneViewsSetServiceRequest {
/// The service information to be updated.
pub endpoints: Option<Vec<ServiceEndpoint>>,
/// Fingerprint of the service information; a hash of the contents. This field is used for optimistic locking when updating the service entries.
pub fingerprint: Option<String>,
/// The name of the resource if user wants to update the service information of the resource.
#[serde(rename="resourceName")]
pub resource_name: Option<String>,
}
impl client::RequestValue for ZoneViewsSetServiceRequest {}
/// [Output Only] If errors occurred during processing of this operation, this field will be populated.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct OperationError {
/// [Output Only] The array of errors encountered while processing this operation.
pub errors: Option<Vec<OperationErrorErrors>>,
}
impl client::NestedType for OperationError {}
impl client::Part for OperationError {}
/// [Output Only] The array of errors encountered while processing this operation.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct OperationErrorErrors {
/// [Output Only] The error type identifier for this error.
pub code: Option<String>,
/// [Output Only] Indicates the field in the request which caused the error. This property is optional.
pub location: Option<String>,
/// [Output Only] An optional, human-readable error message.
pub message: Option<String>,
}
impl client::NestedType for OperationErrorErrors {}
impl client::Part for OperationErrorErrors {}
/// [Output Only] If there are issues with this operation, a warning is returned.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct OperationWarnings {
/// [Output only] The warning type identifier for this warning.
pub code: Option<String>,
/// [Output only] Metadata for this warning in key:value format.
pub data: Option<Vec<OperationWarningsData>>,
/// [Output only] Optional human-readable details for this warning.
pub message: Option<String>,
}
impl client::NestedType for OperationWarnings {}
impl client::Part for OperationWarnings {}
/// [Output only] Metadata for this warning in key:value format.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct OperationWarningsData {
/// [Output Only] Metadata key for this warning.
pub key: Option<String>,
/// [Output Only] Metadata value for this warning.
pub value: Option<String>,
}
impl client::NestedType for OperationWarningsData {}
impl client::Part for OperationWarningsData {}
// ###################
// MethodBuilders ###
// #################
/// A builder providing access to all methods supported on *zoneOperation* resources.
/// It is not used directly, but through the `Resourceviews` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
///
/// # async fn dox() {
/// use std::default::Default;
/// use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// let secret: oauth2::ApplicationSecret = Default::default();
/// let auth = oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `get(...)` and `list(...)`
/// // to build up your call.
/// let rb = hub.zone_operations();
/// # }
/// ```
pub struct ZoneOperationMethods<'a>
where {
hub: &'a Resourceviews<>,
}
impl<'a> client::MethodsBuilder for ZoneOperationMethods<'a> {}
impl<'a> ZoneOperationMethods<'a> {
/// Create a builder to help you perform the following task:
///
/// Retrieves the specified zone-specific operation resource.
///
/// # Arguments
///
/// * `project` - Name of the project scoping this request.
/// * `zone` - Name of the zone scoping this request.
/// * `operation` - Name of the operation resource to return.
pub fn get(&self, project: &str, zone: &str, operation: &str) -> ZoneOperationGetCall<'a> {
ZoneOperationGetCall {
hub: self.hub,
_project: project.to_string(),
_zone: zone.to_string(),
_operation: operation.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Retrieves the list of operation resources contained within the specified zone.
///
/// # Arguments
///
/// * `project` - Name of the project scoping this request.
/// * `zone` - Name of the zone scoping this request.
pub fn list(&self, project: &str, zone: &str) -> ZoneOperationListCall<'a> {
ZoneOperationListCall {
hub: self.hub,
_project: project.to_string(),
_zone: zone.to_string(),
_page_token: Default::default(),
_max_results: Default::default(),
_filter: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
}
/// A builder providing access to all methods supported on *zoneView* resources.
/// It is not used directly, but through the `Resourceviews` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
///
/// # async fn dox() {
/// use std::default::Default;
/// use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// let secret: oauth2::ApplicationSecret = Default::default();
/// let auth = oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `add_resources(...)`, `delete(...)`, `get(...)`, `get_service(...)`, `insert(...)`, `list(...)`, `list_resources(...)`, `remove_resources(...)` and `set_service(...)`
/// // to build up your call.
/// let rb = hub.zone_views();
/// # }
/// ```
pub struct ZoneViewMethods<'a>
where {
hub: &'a Resourceviews<>,
}
impl<'a> client::MethodsBuilder for ZoneViewMethods<'a> {}
impl<'a> ZoneViewMethods<'a> {
/// Create a builder to help you perform the following task:
///
/// Add resources to the view.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
/// * `resourceView` - The name of the resource view.
pub fn add_resources(&self, request: ZoneViewsAddResourcesRequest, project: &str, zone: &str, resource_view: &str) -> ZoneViewAddResourceCall<'a> {
ZoneViewAddResourceCall {
hub: self.hub,
_request: request,
_project: project.to_string(),
_zone: zone.to_string(),
_resource_view: resource_view.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Delete a resource view.
///
/// # Arguments
///
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
/// * `resourceView` - The name of the resource view.
pub fn delete(&self, project: &str, zone: &str, resource_view: &str) -> ZoneViewDeleteCall<'a> {
ZoneViewDeleteCall {
hub: self.hub,
_project: project.to_string(),
_zone: zone.to_string(),
_resource_view: resource_view.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Get the information of a zonal resource view.
///
/// # Arguments
///
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
/// * `resourceView` - The name of the resource view.
pub fn get(&self, project: &str, zone: &str, resource_view: &str) -> ZoneViewGetCall<'a> {
ZoneViewGetCall {
hub: self.hub,
_project: project.to_string(),
_zone: zone.to_string(),
_resource_view: resource_view.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Get the service information of a resource view or a resource.
///
/// # Arguments
///
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
/// * `resourceView` - The name of the resource view.
pub fn get_service(&self, project: &str, zone: &str, resource_view: &str) -> ZoneViewGetServiceCall<'a> {
ZoneViewGetServiceCall {
hub: self.hub,
_project: project.to_string(),
_zone: zone.to_string(),
_resource_view: resource_view.to_string(),
_resource_name: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Create a resource view.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
pub fn insert(&self, request: ResourceView, project: &str, zone: &str) -> ZoneViewInsertCall<'a> {
ZoneViewInsertCall {
hub: self.hub,
_request: request,
_project: project.to_string(),
_zone: zone.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// List resource views.
///
/// # Arguments
///
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
pub fn list(&self, project: &str, zone: &str) -> ZoneViewListCall<'a> {
ZoneViewListCall {
hub: self.hub,
_project: project.to_string(),
_zone: zone.to_string(),
_page_token: Default::default(),
_max_results: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// List the resources of the resource view.
///
/// # Arguments
///
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
/// * `resourceView` - The name of the resource view.
pub fn list_resources(&self, project: &str, zone: &str, resource_view: &str) -> ZoneViewListResourceCall<'a> {
ZoneViewListResourceCall {
hub: self.hub,
_project: project.to_string(),
_zone: zone.to_string(),
_resource_view: resource_view.to_string(),
_service_name: Default::default(),
_page_token: Default::default(),
_max_results: Default::default(),
_list_state: Default::default(),
_format: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Remove resources from the view.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
/// * `resourceView` - The name of the resource view.
pub fn remove_resources(&self, request: ZoneViewsRemoveResourcesRequest, project: &str, zone: &str, resource_view: &str) -> ZoneViewRemoveResourceCall<'a> {
ZoneViewRemoveResourceCall {
hub: self.hub,
_request: request,
_project: project.to_string(),
_zone: zone.to_string(),
_resource_view: resource_view.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Update the service information of a resource view or a resource.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `project` - The project name of the resource view.
/// * `zone` - The zone name of the resource view.
/// * `resourceView` - The name of the resource view.
pub fn set_service(&self, request: ZoneViewsSetServiceRequest, project: &str, zone: &str, resource_view: &str) -> ZoneViewSetServiceCall<'a> {
ZoneViewSetServiceCall {
hub: self.hub,
_request: request,
_project: project.to_string(),
_zone: zone.to_string(),
_resource_view: resource_view.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
}
// ###################
// CallBuilders ###
// #################
/// Retrieves the specified zone-specific operation resource.
///
/// A builder for the *get* method supported by a *zoneOperation* resource.
/// It is not used directly, but through a `ZoneOperationMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_operations().get("project", "zone", "operation")
/// .doit().await;
/// # }
/// ```
pub struct ZoneOperationGetCall<'a>
where {
hub: &'a Resourceviews<>,
_project: String,
_zone: String,
_operation: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneOperationGetCall<'a> {}
impl<'a> ZoneOperationGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneOperations.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("operation", self._operation.to_string()));
for &field in ["alt", "project", "zone", "operation"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/operations/{operation}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::ComputeReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{operation}", "operation")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["operation", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Name of the project scoping this request.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneOperationGetCall<'a> {
self._project = new_value.to_string();
self
}
/// Name of the zone scoping this request.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneOperationGetCall<'a> {
self._zone = new_value.to_string();
self
}
/// Name of the operation resource to return.
///
/// Sets the *operation* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn operation(mut self, new_value: &str) -> ZoneOperationGetCall<'a> {
self._operation = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneOperationGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneOperationGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::ComputeReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneOperationGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Retrieves the list of operation resources contained within the specified zone.
///
/// A builder for the *list* method supported by a *zoneOperation* resource.
/// It is not used directly, but through a `ZoneOperationMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_operations().list("project", "zone")
/// .page_token("Lorem")
/// .max_results(89)
/// .filter("eos")
/// .doit().await;
/// # }
/// ```
pub struct ZoneOperationListCall<'a>
where {
hub: &'a Resourceviews<>,
_project: String,
_zone: String,
_page_token: Option<String>,
_max_results: Option<u32>,
_filter: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneOperationListCall<'a> {}
impl<'a> ZoneOperationListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, OperationList)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneOperations.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(7 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._max_results {
params.push(("maxResults", value.to_string()));
}
if let Some(value) = self._filter {
params.push(("filter", value.to_string()));
}
for &field in ["alt", "project", "zone", "pageToken", "maxResults", "filter"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/operations";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::ComputeReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2);
for param_name in ["zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Name of the project scoping this request.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneOperationListCall<'a> {
self._project = new_value.to_string();
self
}
/// Name of the zone scoping this request.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneOperationListCall<'a> {
self._zone = new_value.to_string();
self
}
/// Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ZoneOperationListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.
///
/// Sets the *max results* query property to the given value.
pub fn max_results(mut self, new_value: u32) -> ZoneOperationListCall<'a> {
self._max_results = Some(new_value);
self
}
/// Optional. Filter expression for filtering listed resources.
///
/// Sets the *filter* query property to the given value.
pub fn filter(mut self, new_value: &str) -> ZoneOperationListCall<'a> {
self._filter = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneOperationListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneOperationListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::ComputeReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneOperationListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Add resources to the view.
///
/// A builder for the *addResources* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// use resourceviews1_beta2::api::ZoneViewsAddResourcesRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = ZoneViewsAddResourcesRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().add_resources(req, "project", "zone", "resourceView")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewAddResourceCall<'a>
where {
hub: &'a Resourceviews<>,
_request: ZoneViewsAddResourcesRequest,
_project: String,
_zone: String,
_resource_view: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewAddResourceCall<'a> {}
impl<'a> ZoneViewAddResourceCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.addResources",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("resourceView", self._resource_view.to_string()));
for &field in ["alt", "project", "zone", "resourceView"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews/{resourceView}/addResources";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{resourceView}", "resourceView")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["resourceView", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: ZoneViewsAddResourcesRequest) -> ZoneViewAddResourceCall<'a> {
self._request = new_value;
self
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewAddResourceCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewAddResourceCall<'a> {
self._zone = new_value.to_string();
self
}
/// The name of the resource view.
///
/// Sets the *resource view* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource_view(mut self, new_value: &str) -> ZoneViewAddResourceCall<'a> {
self._resource_view = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewAddResourceCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewAddResourceCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewAddResourceCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Delete a resource view.
///
/// A builder for the *delete* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().delete("project", "zone", "resourceView")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewDeleteCall<'a>
where {
hub: &'a Resourceviews<>,
_project: String,
_zone: String,
_resource_view: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewDeleteCall<'a> {}
impl<'a> ZoneViewDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("resourceView", self._resource_view.to_string()));
for &field in ["alt", "project", "zone", "resourceView"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews/{resourceView}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{resourceView}", "resourceView")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["resourceView", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewDeleteCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewDeleteCall<'a> {
self._zone = new_value.to_string();
self
}
/// The name of the resource view.
///
/// Sets the *resource view* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource_view(mut self, new_value: &str) -> ZoneViewDeleteCall<'a> {
self._resource_view = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Get the information of a zonal resource view.
///
/// A builder for the *get* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().get("project", "zone", "resourceView")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewGetCall<'a>
where {
hub: &'a Resourceviews<>,
_project: String,
_zone: String,
_resource_view: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewGetCall<'a> {}
impl<'a> ZoneViewGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ResourceView)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("resourceView", self._resource_view.to_string()));
for &field in ["alt", "project", "zone", "resourceView"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews/{resourceView}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::ComputeReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{resourceView}", "resourceView")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["resourceView", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewGetCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewGetCall<'a> {
self._zone = new_value.to_string();
self
}
/// The name of the resource view.
///
/// Sets the *resource view* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource_view(mut self, new_value: &str) -> ZoneViewGetCall<'a> {
self._resource_view = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::ComputeReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Get the service information of a resource view or a resource.
///
/// A builder for the *getService* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().get_service("project", "zone", "resourceView")
/// .resource_name("ipsum")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewGetServiceCall<'a>
where {
hub: &'a Resourceviews<>,
_project: String,
_zone: String,
_resource_view: String,
_resource_name: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewGetServiceCall<'a> {}
impl<'a> ZoneViewGetServiceCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ZoneViewsGetServiceResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.getService",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("resourceView", self._resource_view.to_string()));
if let Some(value) = self._resource_name {
params.push(("resourceName", value.to_string()));
}
for &field in ["alt", "project", "zone", "resourceView", "resourceName"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews/{resourceView}/getService";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{resourceView}", "resourceView")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["resourceView", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewGetServiceCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewGetServiceCall<'a> {
self._zone = new_value.to_string();
self
}
/// The name of the resource view.
///
/// Sets the *resource view* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource_view(mut self, new_value: &str) -> ZoneViewGetServiceCall<'a> {
self._resource_view = new_value.to_string();
self
}
/// The name of the resource if user wants to get the service information of the resource.
///
/// Sets the *resource name* query property to the given value.
pub fn resource_name(mut self, new_value: &str) -> ZoneViewGetServiceCall<'a> {
self._resource_name = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewGetServiceCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewGetServiceCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewGetServiceCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Create a resource view.
///
/// A builder for the *insert* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// use resourceviews1_beta2::api::ResourceView;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = ResourceView::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().insert(req, "project", "zone")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewInsertCall<'a>
where {
hub: &'a Resourceviews<>,
_request: ResourceView,
_project: String,
_zone: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewInsertCall<'a> {}
impl<'a> ZoneViewInsertCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.insert",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
for &field in ["alt", "project", "zone"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2);
for param_name in ["zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: ResourceView) -> ZoneViewInsertCall<'a> {
self._request = new_value;
self
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewInsertCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewInsertCall<'a> {
self._zone = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewInsertCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewInsertCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewInsertCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// List resource views.
///
/// A builder for the *list* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().list("project", "zone")
/// .page_token("dolor")
/// .max_results(-56)
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewListCall<'a>
where {
hub: &'a Resourceviews<>,
_project: String,
_zone: String,
_page_token: Option<String>,
_max_results: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewListCall<'a> {}
impl<'a> ZoneViewListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ZoneViewsList)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._max_results {
params.push(("maxResults", value.to_string()));
}
for &field in ["alt", "project", "zone", "pageToken", "maxResults"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::ComputeReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2);
for param_name in ["zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewListCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewListCall<'a> {
self._zone = new_value.to_string();
self
}
/// Specifies a nextPageToken returned by a previous list request. This token can be used to request the next page of results from a previous list request.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ZoneViewListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// Maximum count of results to be returned. Acceptable values are 0 to 5000, inclusive. (Default: 5000)
///
/// Sets the *max results* query property to the given value.
pub fn max_results(mut self, new_value: i32) -> ZoneViewListCall<'a> {
self._max_results = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::ComputeReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// List the resources of the resource view.
///
/// A builder for the *listResources* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().list_resources("project", "zone", "resourceView")
/// .service_name("duo")
/// .page_token("sed")
/// .max_results(-61)
/// .list_state("Stet")
/// .format("kasd")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewListResourceCall<'a>
where {
hub: &'a Resourceviews<>,
_project: String,
_zone: String,
_resource_view: String,
_service_name: Option<String>,
_page_token: Option<String>,
_max_results: Option<i32>,
_list_state: Option<String>,
_format: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewListResourceCall<'a> {}
impl<'a> ZoneViewListResourceCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ZoneViewsListResourcesResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.listResources",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(10 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("resourceView", self._resource_view.to_string()));
if let Some(value) = self._service_name {
params.push(("serviceName", value.to_string()));
}
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._max_results {
params.push(("maxResults", value.to_string()));
}
if let Some(value) = self._list_state {
params.push(("listState", value.to_string()));
}
if let Some(value) = self._format {
params.push(("format", value.to_string()));
}
for &field in ["alt", "project", "zone", "resourceView", "serviceName", "pageToken", "maxResults", "listState", "format"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews/{resourceView}/resources";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::ComputeReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{resourceView}", "resourceView")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["resourceView", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewListResourceCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewListResourceCall<'a> {
self._zone = new_value.to_string();
self
}
/// The name of the resource view.
///
/// Sets the *resource view* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource_view(mut self, new_value: &str) -> ZoneViewListResourceCall<'a> {
self._resource_view = new_value.to_string();
self
}
/// The service name to return in the response. It is optional and if it is not set, all the service end points will be returned.
///
/// Sets the *service name* query property to the given value.
pub fn service_name(mut self, new_value: &str) -> ZoneViewListResourceCall<'a> {
self._service_name = Some(new_value.to_string());
self
}
/// Specifies a nextPageToken returned by a previous list request. This token can be used to request the next page of results from a previous list request.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ZoneViewListResourceCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// Maximum count of results to be returned. Acceptable values are 0 to 5000, inclusive. (Default: 5000)
///
/// Sets the *max results* query property to the given value.
pub fn max_results(mut self, new_value: i32) -> ZoneViewListResourceCall<'a> {
self._max_results = Some(new_value);
self
}
/// The state of the instance to list. By default, it lists all instances.
///
/// Sets the *list state* query property to the given value.
pub fn list_state(mut self, new_value: &str) -> ZoneViewListResourceCall<'a> {
self._list_state = Some(new_value.to_string());
self
}
/// The requested format of the return value. It can be URL or URL_PORT. A JSON object will be included in the response based on the format. The default format is NONE, which results in no JSON in the response.
///
/// Sets the *format* query property to the given value.
pub fn format(mut self, new_value: &str) -> ZoneViewListResourceCall<'a> {
self._format = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewListResourceCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewListResourceCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::ComputeReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewListResourceCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Remove resources from the view.
///
/// A builder for the *removeResources* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// use resourceviews1_beta2::api::ZoneViewsRemoveResourcesRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = ZoneViewsRemoveResourcesRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().remove_resources(req, "project", "zone", "resourceView")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewRemoveResourceCall<'a>
where {
hub: &'a Resourceviews<>,
_request: ZoneViewsRemoveResourcesRequest,
_project: String,
_zone: String,
_resource_view: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewRemoveResourceCall<'a> {}
impl<'a> ZoneViewRemoveResourceCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.removeResources",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("resourceView", self._resource_view.to_string()));
for &field in ["alt", "project", "zone", "resourceView"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews/{resourceView}/removeResources";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{resourceView}", "resourceView")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["resourceView", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: ZoneViewsRemoveResourcesRequest) -> ZoneViewRemoveResourceCall<'a> {
self._request = new_value;
self
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewRemoveResourceCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewRemoveResourceCall<'a> {
self._zone = new_value.to_string();
self
}
/// The name of the resource view.
///
/// Sets the *resource view* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource_view(mut self, new_value: &str) -> ZoneViewRemoveResourceCall<'a> {
self._resource_view = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewRemoveResourceCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewRemoveResourceCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewRemoveResourceCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Update the service information of a resource view or a resource.
///
/// A builder for the *setService* method supported by a *zoneView* resource.
/// It is not used directly, but through a `ZoneViewMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_resourceviews1_beta2 as resourceviews1_beta2;
/// use resourceviews1_beta2::api::ZoneViewsSetServiceRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use resourceviews1_beta2::{Resourceviews, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Resourceviews::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots().https_or_http().enable_http1().enable_http2().build()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = ZoneViewsSetServiceRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.zone_views().set_service(req, "project", "zone", "resourceView")
/// .doit().await;
/// # }
/// ```
pub struct ZoneViewSetServiceCall<'a>
where {
hub: &'a Resourceviews<>,
_request: ZoneViewsSetServiceRequest,
_project: String,
_zone: String,
_resource_view: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ZoneViewSetServiceCall<'a> {}
impl<'a> ZoneViewSetServiceCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "resourceviews.zoneViews.setService",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("project", self._project.to_string()));
params.push(("zone", self._zone.to_string()));
params.push(("resourceView", self._resource_view.to_string()));
for &field in ["alt", "project", "zone", "resourceView"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "{project}/zones/{zone}/resourceViews/{resourceView}/setService";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{project}", "project"), ("{zone}", "zone"), ("{resourceView}", "resourceView")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3);
for param_name in ["resourceView", "zone", "project"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: ZoneViewsSetServiceRequest) -> ZoneViewSetServiceCall<'a> {
self._request = new_value;
self
}
/// The project name of the resource view.
///
/// Sets the *project* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project(mut self, new_value: &str) -> ZoneViewSetServiceCall<'a> {
self._project = new_value.to_string();
self
}
/// The zone name of the resource view.
///
/// Sets the *zone* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn zone(mut self, new_value: &str) -> ZoneViewSetServiceCall<'a> {
self._zone = new_value.to_string();
self
}
/// The name of the resource view.
///
/// Sets the *resource view* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource_view(mut self, new_value: &str) -> ZoneViewSetServiceCall<'a> {
self._resource_view = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ZoneViewSetServiceCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *alt* (query-string) - Data format for the response.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
/// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits.
pub fn param<T>(mut self, name: T, value: T) -> ZoneViewSetServiceCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ZoneViewSetServiceCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
| 43.532184 | 314 | 0.584521 |
64b27857d2c610b50e6cc04850585b46ba4275ca | 89,759 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
register_long_diagnostics! {
E0023: r##"
A pattern used to match against an enum variant must provide a sub-pattern for
each field of the enum variant. This error indicates that a pattern attempted to
extract an incorrect number of fields from a variant.
```
enum Fruit {
Apple(String, String),
Pear(u32),
}
```
Here the `Apple` variant has two fields, and should be matched against like so:
```
enum Fruit {
Apple(String, String),
Pear(u32),
}
let x = Fruit::Apple(String::new(), String::new());
// Correct.
match x {
Fruit::Apple(a, b) => {},
_ => {}
}
```
Matching with the wrong number of fields has no sensible interpretation:
```compile_fail
enum Fruit {
Apple(String, String),
Pear(u32),
}
let x = Fruit::Apple(String::new(), String::new());
// Incorrect.
match x {
Fruit::Apple(a) => {},
Fruit::Apple(a, b, c) => {},
}
```
Check how many fields the enum was declared with and ensure that your pattern
uses the same number.
"##,
E0025: r##"
Each field of a struct can only be bound once in a pattern. Erroneous code
example:
```compile_fail
struct Foo {
a: u8,
b: u8,
}
fn main(){
let x = Foo { a:1, b:2 };
let Foo { a: x, a: y } = x;
// error: field `a` bound multiple times in the pattern
}
```
Each occurrence of a field name binds the value of that field, so to fix this
error you will have to remove or alter the duplicate uses of the field name.
Perhaps you misspelled another field name? Example:
```
struct Foo {
a: u8,
b: u8,
}
fn main(){
let x = Foo { a:1, b:2 };
let Foo { a: x, b: y } = x; // ok!
}
```
"##,
E0026: r##"
This error indicates that a struct pattern attempted to extract a non-existent
field from a struct. Struct fields are identified by the name used before the
colon `:` so struct patterns should resemble the declaration of the struct type
being matched.
```
// Correct matching.
struct Thing {
x: u32,
y: u32
}
let thing = Thing { x: 1, y: 2 };
match thing {
Thing { x: xfield, y: yfield } => {}
}
```
If you are using shorthand field patterns but want to refer to the struct field
by a different name, you should rename it explicitly.
Change this:
```compile_fail
struct Thing {
x: u32,
y: u32
}
let thing = Thing { x: 0, y: 0 };
match thing {
Thing { x, z } => {}
}
```
To this:
```
struct Thing {
x: u32,
y: u32
}
let thing = Thing { x: 0, y: 0 };
match thing {
Thing { x, y: z } => {}
}
```
"##,
E0027: r##"
This error indicates that a pattern for a struct fails to specify a sub-pattern
for every one of the struct's fields. Ensure that each field from the struct's
definition is mentioned in the pattern, or use `..` to ignore unwanted fields.
For example:
```compile_fail
struct Dog {
name: String,
age: u32,
}
let d = Dog { name: "Rusty".to_string(), age: 8 };
// This is incorrect.
match d {
Dog { age: x } => {}
}
```
This is correct (explicit):
```
struct Dog {
name: String,
age: u32,
}
let d = Dog { name: "Rusty".to_string(), age: 8 };
match d {
Dog { name: ref n, age: x } => {}
}
// This is also correct (ignore unused fields).
match d {
Dog { age: x, .. } => {}
}
```
"##,
E0029: r##"
In a match expression, only numbers and characters can be matched against a
range. This is because the compiler checks that the range is non-empty at
compile-time, and is unable to evaluate arbitrary comparison functions. If you
want to capture values of an orderable type between two end-points, you can use
a guard.
```compile_fail
// The ordering relation for strings can't be evaluated at compile time,
// so this doesn't work:
match string {
"hello" ... "world" => {}
_ => {}
}
// This is a more general version, using a guard:
match string {
s if s >= "hello" && s <= "world" => {}
_ => {}
}
```
"##,
E0033: r##"
This error indicates that a pointer to a trait type cannot be implicitly
dereferenced by a pattern. Every trait defines a type, but because the
size of trait implementors isn't fixed, this type has no compile-time size.
Therefore, all accesses to trait types must be through pointers. If you
encounter this error you should try to avoid dereferencing the pointer.
```ignore
let trait_obj: &SomeTrait = ...;
// This tries to implicitly dereference to create an unsized local variable.
let &invalid = trait_obj;
// You can call methods without binding to the value being pointed at.
trait_obj.method_one();
trait_obj.method_two();
```
You can read more about trait objects in the Trait Object section of the
Reference:
https://doc.rust-lang.org/reference.html#trait-objects
"##,
E0034: r##"
The compiler doesn't know what method to call because more than one method
has the same prototype. Erroneous code example:
```compile_fail
struct Test;
trait Trait1 {
fn foo();
}
trait Trait2 {
fn foo();
}
impl Trait1 for Test { fn foo() {} }
impl Trait2 for Test { fn foo() {} }
fn main() {
Test::foo() // error, which foo() to call?
}
```
To avoid this error, you have to keep only one of them and remove the others.
So let's take our example and fix it:
```
struct Test;
trait Trait1 {
fn foo();
}
impl Trait1 for Test { fn foo() {} }
fn main() {
Test::foo() // and now that's good!
}
```
However, a better solution would be using fully explicit naming of type and
trait:
```
struct Test;
trait Trait1 {
fn foo();
}
trait Trait2 {
fn foo();
}
impl Trait1 for Test { fn foo() {} }
impl Trait2 for Test { fn foo() {} }
fn main() {
<Test as Trait1>::foo()
}
```
One last example:
```
trait F {
fn m(&self);
}
trait G {
fn m(&self);
}
struct X;
impl F for X { fn m(&self) { println!("I am F"); } }
impl G for X { fn m(&self) { println!("I am G"); } }
fn main() {
let f = X;
F::m(&f); // it displays "I am F"
G::m(&f); // it displays "I am G"
}
```
"##,
E0035: r##"
You tried to give a type parameter where it wasn't needed. Erroneous code
example:
```compile_fail
struct Test;
impl Test {
fn method(&self) {}
}
fn main() {
let x = Test;
x.method::<i32>(); // Error: Test::method doesn't need type parameter!
}
```
To fix this error, just remove the type parameter:
```
struct Test;
impl Test {
fn method(&self) {}
}
fn main() {
let x = Test;
x.method(); // OK, we're good!
}
```
"##,
E0036: r##"
This error occurrs when you pass too many or not enough type parameters to
a method. Erroneous code example:
```compile_fail
struct Test;
impl Test {
fn method<T>(&self, v: &[T]) -> usize {
v.len()
}
}
fn main() {
let x = Test;
let v = &[0];
x.method::<i32, i32>(v); // error: only one type parameter is expected!
}
```
To fix it, just specify a correct number of type parameters:
```
struct Test;
impl Test {
fn method<T>(&self, v: &[T]) -> usize {
v.len()
}
}
fn main() {
let x = Test;
let v = &[0];
x.method::<i32>(v); // OK, we're good!
}
```
Please note on the last example that we could have called `method` like this:
```ignore
x.method(v);
```
"##,
E0040: r##"
It is not allowed to manually call destructors in Rust. It is also not
necessary to do this since `drop` is called automatically whenever a value goes
out of scope.
Here's an example of this error:
```compile_fail
struct Foo {
x: i32,
}
impl Drop for Foo {
fn drop(&mut self) {
println!("kaboom");
}
}
fn main() {
let mut x = Foo { x: -7 };
x.drop(); // error: explicit use of destructor method
}
```
"##,
E0044: r##"
You can't use type parameters on foreign items. Example of erroneous code:
```compile_fail
extern { fn some_func<T>(x: T); }
```
To fix this, replace the type parameter with the specializations that you
need:
```
extern { fn some_func_i32(x: i32); }
extern { fn some_func_i64(x: i64); }
```
"##,
E0045: r##"
Rust only supports variadic parameters for interoperability with C code in its
FFI. As such, variadic parameters can only be used with functions which are
using the C ABI. Examples of erroneous code:
```compile_fail
extern "rust-call" { fn foo(x: u8, ...); }
// or
fn foo(x: u8, ...) {}
```
To fix such code, put them in an extern "C" block:
```ignore
extern "C" fn foo(x: u8, ...);
```
Or:
```
extern "C" {
fn foo (x: u8, ...);
}
```
"##,
E0046: r##"
Items are missing in a trait implementation. Erroneous code example:
```compile_fail
trait Foo {
fn foo();
}
struct Bar;
impl Foo for Bar {}
// error: not all trait items implemented, missing: `foo`
```
When trying to make some type implement a trait `Foo`, you must, at minimum,
provide implementations for all of `Foo`'s required methods (meaning the
methods that do not have default implementations), as well as any required
trait items like associated types or constants. Example:
```
trait Foo {
fn foo();
}
struct Bar;
impl Foo for Bar {
fn foo() {} // ok!
}
```
"##,
E0049: r##"
This error indicates that an attempted implementation of a trait method
has the wrong number of type parameters.
For example, the trait below has a method `foo` with a type parameter `T`,
but the implementation of `foo` for the type `Bar` is missing this parameter:
```compile_fail
trait Foo {
fn foo<T: Default>(x: T) -> Self;
}
struct Bar;
// error: method `foo` has 0 type parameters but its trait declaration has 1
// type parameter
impl Foo for Bar {
fn foo(x: bool) -> Self { Bar }
}
```
"##,
E0050: r##"
This error indicates that an attempted implementation of a trait method
has the wrong number of function parameters.
For example, the trait below has a method `foo` with two function parameters
(`&self` and `u8`), but the implementation of `foo` for the type `Bar` omits
the `u8` parameter:
```compile_fail
trait Foo {
fn foo(&self, x: u8) -> bool;
}
struct Bar;
// error: method `foo` has 1 parameter but the declaration in trait `Foo::foo`
// has 2
impl Foo for Bar {
fn foo(&self) -> bool { true }
}
```
"##,
E0053: r##"
The parameters of any trait method must match between a trait implementation
and the trait definition.
Here are a couple examples of this error:
```compile_fail
trait Foo {
fn foo(x: u16);
fn bar(&self);
}
struct Bar;
impl Foo for Bar {
// error, expected u16, found i16
fn foo(x: i16) { }
// error, values differ in mutability
fn bar(&mut self) { }
}
```
"##,
E0054: r##"
It is not allowed to cast to a bool. If you are trying to cast a numeric type
to a bool, you can compare it with zero instead:
```compile_fail
let x = 5;
// Not allowed, won't compile
let x_is_nonzero = x as bool;
```
```
let x = 5;
// Ok
let x_is_nonzero = x != 0;
```
"##,
E0055: r##"
During a method call, a value is automatically dereferenced as many times as
needed to make the value's type match the method's receiver. The catch is that
the compiler will only attempt to dereference a number of times up to the
recursion limit (which can be set via the `recursion_limit` attribute).
For a somewhat artificial example:
```compile_fail,ignore
#![recursion_limit="2"]
struct Foo;
impl Foo {
fn foo(&self) {}
}
fn main() {
let foo = Foo;
let ref_foo = &&Foo;
// error, reached the recursion limit while auto-dereferencing &&Foo
ref_foo.foo();
}
```
One fix may be to increase the recursion limit. Note that it is possible to
create an infinite recursion of dereferencing, in which case the only fix is to
somehow break the recursion.
"##,
E0057: r##"
When invoking closures or other implementations of the function traits `Fn`,
`FnMut` or `FnOnce` using call notation, the number of parameters passed to the
function must match its definition.
An example using a closure:
```compile_fail
let f = |x| x * 3;
let a = f(); // invalid, too few parameters
let b = f(4); // this works!
let c = f(2, 3); // invalid, too many parameters
```
A generic function must be treated similarly:
```
fn foo<F: Fn()>(f: F) {
f(); // this is valid, but f(3) would not work
}
```
"##,
E0059: r##"
The built-in function traits are generic over a tuple of the function arguments.
If one uses angle-bracket notation (`Fn<(T,), Output=U>`) instead of parentheses
(`Fn(T) -> U`) to denote the function trait, the type parameter should be a
tuple. Otherwise function call notation cannot be used and the trait will not be
implemented by closures.
The most likely source of this error is using angle-bracket notation without
wrapping the function argument type into a tuple, for example:
```compile_fail
fn foo<F: Fn<i32>>(f: F) -> F::Output { f(3) }
```
It can be fixed by adjusting the trait bound like this:
```ignore
fn foo<F: Fn<(i32,)>>(f: F) -> F::Output { f(3) }
```
Note that `(T,)` always denotes the type of a 1-tuple containing an element of
type `T`. The comma is necessary for syntactic disambiguation.
"##,
E0060: r##"
External C functions are allowed to be variadic. However, a variadic function
takes a minimum number of arguments. For example, consider C's variadic `printf`
function:
```ignore
extern crate libc;
use libc::{ c_char, c_int };
extern "C" {
fn printf(_: *const c_char, ...) -> c_int;
}
```
Using this declaration, it must be called with at least one argument, so
simply calling `printf()` is invalid. But the following uses are allowed:
```ignore
unsafe {
use std::ffi::CString;
printf(CString::new("test\n").unwrap().as_ptr());
printf(CString::new("number = %d\n").unwrap().as_ptr(), 3);
printf(CString::new("%d, %d\n").unwrap().as_ptr(), 10, 5);
}
```
"##,
E0061: r##"
The number of arguments passed to a function must match the number of arguments
specified in the function signature.
For example, a function like:
```
fn f(a: u16, b: &str) {}
```
Must always be called with exactly two arguments, e.g. `f(2, "test")`.
Note that Rust does not have a notion of optional function arguments or
variadic functions (except for its C-FFI).
"##,
E0062: r##"
This error indicates that during an attempt to build a struct or struct-like
enum variant, one of the fields was specified more than once. Erroneous code
example:
```compile_fail
struct Foo {
x: i32
}
fn main() {
let x = Foo {
x: 0,
x: 0, // error: field `x` specified more than once
};
}
```
Each field should be specified exactly one time. Example:
```
struct Foo {
x: i32
}
fn main() {
let x = Foo { x: 0 }; // ok!
}
```
"##,
E0063: r##"
This error indicates that during an attempt to build a struct or struct-like
enum variant, one of the fields was not provided. Erroneous code example:
```compile_fail
struct Foo {
x: i32,
y: i32
}
fn main() {
let x = Foo { x: 0 }; // error: missing field: `y`
}
```
Each field should be specified exactly once. Example:
```
struct Foo {
x: i32,
y: i32
}
fn main() {
let x = Foo { x: 0, y: 0 }; // ok!
}
```
"##,
E0066: r##"
Box placement expressions (like C++'s "placement new") do not yet support any
place expression except the exchange heap (i.e. `std::boxed::HEAP`).
Furthermore, the syntax is changing to use `in` instead of `box`. See [RFC 470]
and [RFC 809] for more details.
[RFC 470]: https://github.com/rust-lang/rfcs/pull/470
[RFC 809]: https://github.com/rust-lang/rfcs/pull/809
"##,
E0067: r##"
The left-hand side of a compound assignment expression must be an lvalue
expression. An lvalue expression represents a memory location and includes
item paths (ie, namespaced variables), dereferences, indexing expressions,
and field references.
Let's start with some erroneous code examples:
```compile_fail
use std::collections::LinkedList;
// Bad: assignment to non-lvalue expression
LinkedList::new() += 1;
// ...
fn some_func(i: &mut i32) {
i += 12; // Error : '+=' operation cannot be applied on a reference !
}
```
And now some working examples:
```
let mut i : i32 = 0;
i += 12; // Good !
// ...
fn some_func(i: &mut i32) {
*i += 12; // Good !
}
```
"##,
E0069: r##"
The compiler found a function whose body contains a `return;` statement but
whose return type is not `()`. An example of this is:
```compile_fail
// error
fn foo() -> u8 {
return;
}
```
Since `return;` is just like `return ();`, there is a mismatch between the
function's return type and the value being returned.
"##,
E0070: r##"
The left-hand side of an assignment operator must be an lvalue expression. An
lvalue expression represents a memory location and can be a variable (with
optional namespacing), a dereference, an indexing expression or a field
reference.
More details can be found here:
https://doc.rust-lang.org/reference.html#lvalues-rvalues-and-temporaries
Now, we can go further. Here are some erroneous code examples:
```compile_fail
struct SomeStruct {
x: i32,
y: i32
}
const SOME_CONST : i32 = 12;
fn some_other_func() {}
fn some_function() {
SOME_CONST = 14; // error : a constant value cannot be changed!
1 = 3; // error : 1 isn't a valid lvalue!
some_other_func() = 4; // error : we can't assign value to a function!
SomeStruct.x = 12; // error : SomeStruct a structure name but it is used
// like a variable!
}
```
And now let's give working examples:
```
struct SomeStruct {
x: i32,
y: i32
}
let mut s = SomeStruct {x: 0, y: 0};
s.x = 3; // that's good !
// ...
fn some_func(x: &mut i32) {
*x = 12; // that's good !
}
```
"##,
E0071: r##"
You tried to use structure-literal syntax to create an item that is
not a struct-style structure or enum variant.
Example of erroneous code:
```compile_fail
enum Foo { FirstValue(i32) };
let u = Foo::FirstValue { value: 0 }; // error: Foo::FirstValue
// isn't a structure!
// or even simpler, if the name doesn't refer to a structure at all.
let t = u32 { value: 4 }; // error: `u32` does not name a structure.
```
To fix this, ensure that the name was correctly spelled, and that
the correct form of initializer was used.
For example, the code above can be fixed to:
```
enum Foo {
FirstValue(i32)
}
fn main() {
let u = Foo::FirstValue(0i32);
let t = 4;
}
```
"##,
E0073: r##"
You cannot define a struct (or enum) `Foo` that requires an instance of `Foo`
in order to make a new `Foo` value. This is because there would be no way a
first instance of `Foo` could be made to initialize another instance!
Here's an example of a struct that has this problem:
```ignore
struct Foo { x: Box<Foo> } // error
```
One fix is to use `Option`, like so:
```
struct Foo { x: Option<Box<Foo>> }
```
Now it's possible to create at least one instance of `Foo`: `Foo { x: None }`.
"##,
E0074: r##"
When using the `#[simd]` attribute on a tuple struct, the components of the
tuple struct must all be of a concrete, nongeneric type so the compiler can
reason about how to use SIMD with them. This error will occur if the types
are generic.
This will cause an error:
```ignore
#![feature(repr_simd)]
#[repr(simd)]
struct Bad<T>(T, T, T);
```
This will not:
```
#![feature(repr_simd)]
#[repr(simd)]
struct Good(u32, u32, u32);
```
"##,
E0075: r##"
The `#[simd]` attribute can only be applied to non empty tuple structs, because
it doesn't make sense to try to use SIMD operations when there are no values to
operate on.
This will cause an error:
```compile_fail
#![feature(repr_simd)]
#[repr(simd)]
struct Bad;
```
This will not:
```
#![feature(repr_simd)]
#[repr(simd)]
struct Good(u32);
```
"##,
E0076: r##"
When using the `#[simd]` attribute to automatically use SIMD operations in tuple
struct, the types in the struct must all be of the same type, or the compiler
will trigger this error.
This will cause an error:
```compile_fail
#![feature(repr_simd)]
#[repr(simd)]
struct Bad(u16, u32, u32);
```
This will not:
```
#![feature(repr_simd)]
#[repr(simd)]
struct Good(u32, u32, u32);
```
"##,
E0077: r##"
When using the `#[simd]` attribute on a tuple struct, the elements in the tuple
must be machine types so SIMD operations can be applied to them.
This will cause an error:
```compile_fail
#![feature(repr_simd)]
#[repr(simd)]
struct Bad(String);
```
This will not:
```
#![feature(repr_simd)]
#[repr(simd)]
struct Good(u32, u32, u32);
```
"##,
E0079: r##"
Enum variants which contain no data can be given a custom integer
representation. This error indicates that the value provided is not an integer
literal and is therefore invalid.
For example, in the following code:
```compile_fail
enum Foo {
Q = "32"
}
```
We try to set the representation to a string.
There's no general fix for this; if you can work with an integer then just set
it to one:
```
enum Foo {
Q = 32
}
```
However if you actually wanted a mapping between variants and non-integer
objects, it may be preferable to use a method with a match instead:
```
enum Foo { Q }
impl Foo {
fn get_str(&self) -> &'static str {
match *self {
Foo::Q => "32",
}
}
}
```
"##,
E0081: r##"
Enum discriminants are used to differentiate enum variants stored in memory.
This error indicates that the same value was used for two or more variants,
making them impossible to tell apart.
```compile_fail
// Bad.
enum Enum {
P = 3,
X = 3,
Y = 5
}
```
```
// Good.
enum Enum {
P,
X = 3,
Y = 5
}
```
Note that variants without a manually specified discriminant are numbered from
top to bottom starting from 0, so clashes can occur with seemingly unrelated
variants.
```compile_fail
enum Bad {
X,
Y = 0
}
```
Here `X` will have already been specified the discriminant 0 by the time `Y` is
encountered, so a conflict occurs.
"##,
E0082: r##"
When you specify enum discriminants with `=`, the compiler expects `isize`
values by default. Or you can add the `repr` attibute to the enum declaration
for an explicit choice of the discriminant type. In either cases, the
discriminant values must fall within a valid range for the expected type;
otherwise this error is raised. For example:
```ignore
#[repr(u8)]
enum Thing {
A = 1024,
B = 5
}
```
Here, 1024 lies outside the valid range for `u8`, so the discriminant for `A` is
invalid. Here is another, more subtle example which depends on target word size:
```ignore
enum DependsOnPointerSize {
A = 1 << 32
}
```
Here, `1 << 32` is interpreted as an `isize` value. So it is invalid for 32 bit
target (`target_pointer_width = "32"`) but valid for 64 bit target.
You may want to change representation types to fix this, or else change invalid
discriminant values so that they fit within the existing type.
"##,
E0084: r##"
An unsupported representation was attempted on a zero-variant enum.
Erroneous code example:
```compile_fail
#[repr(i32)]
enum NightsWatch {} // error: unsupported representation for zero-variant enum
```
It is impossible to define an integer type to be used to represent zero-variant
enum values because there are no zero-variant enum values. There is no way to
construct an instance of the following type using only safe code. So you have
two solutions. Either you add variants in your enum:
```
#[repr(i32)]
enum NightsWatch {
JonSnow,
Commander,
}
```
or you remove the integer represention of your enum:
```
enum NightsWatch {}
```
"##,
E0087: r##"
Too many type parameters were supplied for a function. For example:
```compile_fail
fn foo<T>() {}
fn main() {
foo::<f64, bool>(); // error, expected 1 parameter, found 2 parameters
}
```
The number of supplied parameters must exactly match the number of defined type
parameters.
"##,
E0088: r##"
You gave too many lifetime parameters. Erroneous code example:
```compile_fail
fn f() {}
fn main() {
f::<'static>() // error: too many lifetime parameters provided
}
```
Please check you give the right number of lifetime parameters. Example:
```
fn f() {}
fn main() {
f() // ok!
}
```
It's also important to note that the Rust compiler can generally
determine the lifetime by itself. Example:
```
struct Foo {
value: String
}
impl Foo {
// it can be written like this
fn get_value<'a>(&'a self) -> &'a str { &self.value }
// but the compiler works fine with this too:
fn without_lifetime(&self) -> &str { &self.value }
}
fn main() {
let f = Foo { value: "hello".to_owned() };
println!("{}", f.get_value());
println!("{}", f.without_lifetime());
}
```
"##,
E0089: r##"
Not enough type parameters were supplied for a function. For example:
```compile_fail
fn foo<T, U>() {}
fn main() {
foo::<f64>(); // error, expected 2 parameters, found 1 parameter
}
```
Note that if a function takes multiple type parameters but you want the compiler
to infer some of them, you can use type placeholders:
```compile_fail
fn foo<T, U>(x: T) {}
fn main() {
let x: bool = true;
foo::<f64>(x); // error, expected 2 parameters, found 1 parameter
foo::<_, f64>(x); // same as `foo::<bool, f64>(x)`
}
```
"##,
E0091: r##"
You gave an unnecessary type parameter in a type alias. Erroneous code
example:
```compile_fail
type Foo<T> = u32; // error: type parameter `T` is unused
// or:
type Foo<A,B> = Box<A>; // error: type parameter `B` is unused
```
Please check you didn't write too many type parameters. Example:
```
type Foo = u32; // ok!
type Foo2<A> = Box<A>; // ok!
```
"##,
E0092: r##"
You tried to declare an undefined atomic operation function.
Erroneous code example:
```compile_fail
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn atomic_foo(); // error: unrecognized atomic operation
// function
}
```
Please check you didn't make a mistake in the function's name. All intrinsic
functions are defined in librustc_trans/trans/intrinsic.rs and in
libcore/intrinsics.rs in the Rust source code. Example:
```
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn atomic_fence(); // ok!
}
```
"##,
E0093: r##"
You declared an unknown intrinsic function. Erroneous code example:
```compile_fail
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn foo(); // error: unrecognized intrinsic function: `foo`
}
fn main() {
unsafe {
foo();
}
}
```
Please check you didn't make a mistake in the function's name. All intrinsic
functions are defined in librustc_trans/trans/intrinsic.rs and in
libcore/intrinsics.rs in the Rust source code. Example:
```
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn atomic_fence(); // ok!
}
fn main() {
unsafe {
atomic_fence();
}
}
```
"##,
E0094: r##"
You gave an invalid number of type parameters to an intrinsic function.
Erroneous code example:
```compile_fail
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn size_of<T, U>() -> usize; // error: intrinsic has wrong number
// of type parameters
}
```
Please check that you provided the right number of lifetime parameters
and verify with the function declaration in the Rust source code.
Example:
```
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn size_of<T>() -> usize; // ok!
}
```
"##,
E0101: r##"
You hit this error because the compiler lacks the information to
determine a type for this expression. Erroneous code example:
```compile_fail
fn main() {
let x = |_| {}; // error: cannot determine a type for this expression
}
```
You have two possibilities to solve this situation:
* Give an explicit definition of the expression
* Infer the expression
Examples:
```
fn main() {
let x = |_ : u32| {}; // ok!
// or:
let x = |_| {};
x(0u32);
}
```
"##,
E0102: r##"
You hit this error because the compiler lacks the information to
determine the type of this variable. Erroneous code example:
```compile_fail
fn main() {
// could be an array of anything
let x = []; // error: cannot determine a type for this local variable
}
```
To solve this situation, constrain the type of the variable.
Examples:
```
#![allow(unused_variables)]
fn main() {
let x: [u8; 0] = [];
}
```
"##,
E0106: r##"
This error indicates that a lifetime is missing from a type. If it is an error
inside a function signature, the problem may be with failing to adhere to the
lifetime elision rules (see below).
Here are some simple examples of where you'll run into this error:
```compile_fail
struct Foo { x: &bool } // error
struct Foo<'a> { x: &'a bool } // correct
enum Bar { A(u8), B(&bool), } // error
enum Bar<'a> { A(u8), B(&'a bool), } // correct
type MyStr = &str; // error
type MyStr<'a> = &'a str; // correct
```
Lifetime elision is a special, limited kind of inference for lifetimes in
function signatures which allows you to leave out lifetimes in certain cases.
For more background on lifetime elision see [the book][book-le].
The lifetime elision rules require that any function signature with an elided
output lifetime must either have
- exactly one input lifetime
- or, multiple input lifetimes, but the function must also be a method with a
`&self` or `&mut self` receiver
In the first case, the output lifetime is inferred to be the same as the unique
input lifetime. In the second case, the lifetime is instead inferred to be the
same as the lifetime on `&self` or `&mut self`.
Here are some examples of elision errors:
```compile_fail
// error, no input lifetimes
fn foo() -> &str { }
// error, `x` and `y` have distinct lifetimes inferred
fn bar(x: &str, y: &str) -> &str { }
// error, `y`'s lifetime is inferred to be distinct from `x`'s
fn baz<'a>(x: &'a str, y: &str) -> &str { }
```
[book-le]: https://doc.rust-lang.org/nightly/book/lifetimes.html#lifetime-elision
"##,
E0107: r##"
This error means that an incorrect number of lifetime parameters were provided
for a type (like a struct or enum) or trait.
Some basic examples include:
```compile_fail
struct Foo<'a>(&'a str);
enum Bar { A, B, C }
struct Baz<'a> {
foo: Foo, // error: expected 1, found 0
bar: Bar<'a>, // error: expected 0, found 1
}
```
Here's an example that is currently an error, but may work in a future version
of Rust:
```compile_fail
struct Foo<'a>(&'a str);
trait Quux { }
impl Quux for Foo { } // error: expected 1, found 0
```
Lifetime elision in implementation headers was part of the lifetime elision
RFC. It is, however, [currently unimplemented][iss15872].
[iss15872]: https://github.com/rust-lang/rust/issues/15872
"##,
E0116: r##"
You can only define an inherent implementation for a type in the same crate
where the type was defined. For example, an `impl` block as below is not allowed
since `Vec` is defined in the standard library:
```compile_fail
impl Vec<u8> { } // error
```
To fix this problem, you can do either of these things:
- define a trait that has the desired associated functions/types/constants and
implement the trait for the type in question
- define a new type wrapping the type and define an implementation on the new
type
Note that using the `type` keyword does not work here because `type` only
introduces a type alias:
```compile_fail
type Bytes = Vec<u8>;
impl Bytes { } // error, same as above
```
"##,
E0117: r##"
This error indicates a violation of one of Rust's orphan rules for trait
implementations. The rule prohibits any implementation of a foreign trait (a
trait defined in another crate) where
- the type that is implementing the trait is foreign
- all of the parameters being passed to the trait (if there are any) are also
foreign.
Here's one example of this error:
```compile_fail
impl Drop for u32 {}
```
To avoid this kind of error, ensure that at least one local type is referenced
by the `impl`:
```ignore
pub struct Foo; // you define your type in your crate
impl Drop for Foo { // and you can implement the trait on it!
// code of trait implementation here
}
impl From<Foo> for i32 { // or you use a type from your crate as
// a type parameter
fn from(i: Foo) -> i32 {
0
}
}
```
Alternatively, define a trait locally and implement that instead:
```
trait Bar {
fn get(&self) -> usize;
}
impl Bar for u32 {
fn get(&self) -> usize { 0 }
}
```
For information on the design of the orphan rules, see [RFC 1023].
[RFC 1023]: https://github.com/rust-lang/rfcs/pull/1023
"##,
E0118: r##"
You're trying to write an inherent implementation for something which isn't a
struct nor an enum. Erroneous code example:
```compile_fail
impl (u8, u8) { // error: no base type found for inherent implementation
fn get_state(&self) -> String {
// ...
}
}
```
To fix this error, please implement a trait on the type or wrap it in a struct.
Example:
```
// we create a trait here
trait LiveLongAndProsper {
fn get_state(&self) -> String;
}
// and now you can implement it on (u8, u8)
impl LiveLongAndProsper for (u8, u8) {
fn get_state(&self) -> String {
"He's dead, Jim!".to_owned()
}
}
```
Alternatively, you can create a newtype. A newtype is a wrapping tuple-struct.
For example, `NewType` is a newtype over `Foo` in `struct NewType(Foo)`.
Example:
```
struct TypeWrapper((u8, u8));
impl TypeWrapper {
fn get_state(&self) -> String {
"Fascinating!".to_owned()
}
}
```
"##,
E0119: r##"
There are conflicting trait implementations for the same type.
Example of erroneous code:
```compile_fail
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for T {
fn get(&self) -> usize { 0 }
}
struct Foo {
value: usize
}
impl MyTrait for Foo { // error: conflicting implementations of trait
// `MyTrait` for type `Foo`
fn get(&self) -> usize { self.value }
}
```
When looking for the implementation for the trait, the compiler finds
both the `impl<T> MyTrait for T` where T is all types and the `impl
MyTrait for Foo`. Since a trait cannot be implemented multiple times,
this is an error. So, when you write:
```
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for T {
fn get(&self) -> usize { 0 }
}
```
This makes the trait implemented on all types in the scope. So if you
try to implement it on another one after that, the implementations will
conflict. Example:
```
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for T {
fn get(&self) -> usize { 0 }
}
struct Foo;
fn main() {
let f = Foo;
f.get(); // the trait is implemented so we can use it
}
```
"##,
E0120: r##"
An attempt was made to implement Drop on a trait, which is not allowed: only
structs and enums can implement Drop. An example causing this error:
```compile_fail
trait MyTrait {}
impl Drop for MyTrait {
fn drop(&mut self) {}
}
```
A workaround for this problem is to wrap the trait up in a struct, and implement
Drop on that. An example is shown below:
```
trait MyTrait {}
struct MyWrapper<T: MyTrait> { foo: T }
impl <T: MyTrait> Drop for MyWrapper<T> {
fn drop(&mut self) {}
}
```
Alternatively, wrapping trait objects requires something like the following:
```
trait MyTrait {}
//or Box<MyTrait>, if you wanted an owned trait object
struct MyWrapper<'a> { foo: &'a MyTrait }
impl <'a> Drop for MyWrapper<'a> {
fn drop(&mut self) {}
}
```
"##,
E0121: r##"
In order to be consistent with Rust's lack of global type inference, type
placeholders are disallowed by design in item signatures.
Examples of this error include:
```compile_fail
fn foo() -> _ { 5 } // error, explicitly write out the return type instead
static BAR: _ = "test"; // error, explicitly write out the type instead
```
"##,
E0122: r##"
An attempt was made to add a generic constraint to a type alias. While Rust will
allow this with a warning, it will not currently enforce the constraint.
Consider the example below:
```
trait Foo{}
type MyType<R: Foo> = (R, ());
fn main() {
let t: MyType<u32>;
}
```
We're able to declare a variable of type `MyType<u32>`, despite the fact that
`u32` does not implement `Foo`. As a result, one should avoid using generic
constraints in concert with type aliases.
"##,
E0124: r##"
You declared two fields of a struct with the same name. Erroneous code
example:
```compile_fail
struct Foo {
field1: i32,
field1: i32, // error: field is already declared
}
```
Please verify that the field names have been correctly spelled. Example:
```
struct Foo {
field1: i32,
field2: i32, // ok!
}
```
"##,
E0128: r##"
Type parameter defaults can only use parameters that occur before them.
Erroneous code example:
```compile_fail
struct Foo<T=U, U=()> {
field1: T,
filed2: U,
}
// error: type parameters with a default cannot use forward declared
// identifiers
```
Since type parameters are evaluated in-order, you may be able to fix this issue
by doing:
```
struct Foo<U=(), T=U> {
field1: T,
filed2: U,
}
```
Please also verify that this wasn't because of a name-clash and rename the type
parameter if so.
"##,
E0131: r##"
It is not possible to define `main` with type parameters, or even with function
parameters. When `main` is present, it must take no arguments and return `()`.
Erroneous code example:
```compile_fail
fn main<T>() { // error: main function is not allowed to have type parameters
}
```
"##,
E0132: r##"
A function with the `start` attribute was declared with type parameters.
Erroneous code example:
```compile_fail
#![feature(start)]
#[start]
fn f<T>() {}
```
It is not possible to declare type parameters on a function that has the `start`
attribute. Such a function must have the following type signature (for more
information: http://doc.rust-lang.org/stable/book/no-stdlib.html):
```ignore
fn(isize, *const *const u8) -> isize;
```
Example:
```
#![feature(start)]
#[start]
fn my_start(argc: isize, argv: *const *const u8) -> isize {
0
}
```
"##,
E0164: r##"
This error means that an attempt was made to match a struct type enum
variant as a non-struct type:
```compile_fail
enum Foo { B { i: u32 } }
fn bar(foo: Foo) -> u32 {
match foo {
Foo::B(i) => i, // error E0164
}
}
```
Try using `{}` instead:
```
enum Foo { B { i: u32 } }
fn bar(foo: Foo) -> u32 {
match foo {
Foo::B{i} => i,
}
}
```
"##,
E0166: r##"
This error means that the compiler found a return expression in a function
marked as diverging. A function diverges if it has `!` in the place of the
return type in its signature. For example:
```compile_fail
fn foo() -> ! { return; } // error
```
For a function that diverges, every control path in the function must never
return, for example with a `loop` that never breaks or a call to another
diverging function (such as `panic!()`).
"##,
E0172: r##"
This error means that an attempt was made to specify the type of a variable with
a combination of a concrete type and a trait. Consider the following example:
```compile_fail
fn foo(bar: i32+std::fmt::Display) {}
```
The code is trying to specify that we want to receive a signed 32-bit integer
which also implements `Display`. This doesn't make sense: when we pass `i32`, a
concrete type, it implicitly includes all of the traits that it implements.
This includes `Display`, `Debug`, `Clone`, and a host of others.
If `i32` implements the trait we desire, there's no need to specify the trait
separately. If it does not, then we need to `impl` the trait for `i32` before
passing it into `foo`. Either way, a fixed definition for `foo` will look like
the following:
```
fn foo(bar: i32) {}
```
To learn more about traits, take a look at the Book:
https://doc.rust-lang.org/book/traits.html
"##,
E0178: r##"
In types, the `+` type operator has low precedence, so it is often necessary
to use parentheses.
For example:
```compile_fail
trait Foo {}
struct Bar<'a> {
w: &'a Foo + Copy, // error, use &'a (Foo + Copy)
x: &'a Foo + 'a, // error, use &'a (Foo + 'a)
y: &'a mut Foo + 'a, // error, use &'a mut (Foo + 'a)
z: fn() -> Foo + 'a, // error, use fn() -> (Foo + 'a)
}
```
More details can be found in [RFC 438].
[RFC 438]: https://github.com/rust-lang/rfcs/pull/438
"##,
E0184: r##"
Explicitly implementing both Drop and Copy for a type is currently disallowed.
This feature can make some sense in theory, but the current implementation is
incorrect and can lead to memory unsafety (see [issue #20126][iss20126]), so
it has been disabled for now.
[iss20126]: https://github.com/rust-lang/rust/issues/20126
"##,
E0185: r##"
An associated function for a trait was defined to be static, but an
implementation of the trait declared the same function to be a method (i.e. to
take a `self` parameter).
Here's an example of this error:
```compile_fail
trait Foo {
fn foo();
}
struct Bar;
impl Foo for Bar {
// error, method `foo` has a `&self` declaration in the impl, but not in
// the trait
fn foo(&self) {}
}
```
"##,
E0186: r##"
An associated function for a trait was defined to be a method (i.e. to take a
`self` parameter), but an implementation of the trait declared the same function
to be static.
Here's an example of this error:
```compile_fail
trait Foo {
fn foo(&self);
}
struct Bar;
impl Foo for Bar {
// error, method `foo` has a `&self` declaration in the trait, but not in
// the impl
fn foo() {}
}
```
"##,
E0191: r##"
Trait objects need to have all associated types specified. Erroneous code
example:
```compile_fail
trait Trait {
type Bar;
}
type Foo = Trait; // error: the value of the associated type `Bar` (from
// the trait `Trait`) must be specified
```
Please verify you specified all associated types of the trait and that you
used the right trait. Example:
```
trait Trait {
type Bar;
}
type Foo = Trait<Bar=i32>; // ok!
```
"##,
E0192: r##"
Negative impls are only allowed for traits with default impls. For more
information see the [opt-in builtin traits RFC](https://github.com/rust-lang/
rfcs/blob/master/text/0019-opt-in-builtin-traits.md).
"##,
E0193: r##"
`where` clauses must use generic type parameters: it does not make sense to use
them otherwise. An example causing this error:
```ignore
trait Foo {
fn bar(&self);
}
#[derive(Copy,Clone)]
struct Wrapper<T> {
Wrapped: T
}
impl Foo for Wrapper<u32> where Wrapper<u32>: Clone {
fn bar(&self) { }
}
```
This use of a `where` clause is strange - a more common usage would look
something like the following:
```
trait Foo {
fn bar(&self);
}
#[derive(Copy,Clone)]
struct Wrapper<T> {
Wrapped: T
}
impl <T> Foo for Wrapper<T> where Wrapper<T>: Clone {
fn bar(&self) { }
}
```
Here, we're saying that the implementation exists on Wrapper only when the
wrapped type `T` implements `Clone`. The `where` clause is important because
some types will not implement `Clone`, and thus will not get this method.
In our erroneous example, however, we're referencing a single concrete type.
Since we know for certain that `Wrapper<u32>` implements `Clone`, there's no
reason to also specify it in a `where` clause.
"##,
E0194: r##"
A type parameter was declared which shadows an existing one. An example of this
error:
```compile_fail
trait Foo<T> {
fn do_something(&self) -> T;
fn do_something_else<T: Clone>(&self, bar: T);
}
```
In this example, the trait `Foo` and the trait method `do_something_else` both
define a type parameter `T`. This is not allowed: if the method wishes to
define a type parameter, it must use a different name for it.
"##,
E0195: r##"
Your method's lifetime parameters do not match the trait declaration.
Erroneous code example:
```compile_fail
trait Trait {
fn bar<'a,'b:'a>(x: &'a str, y: &'b str);
}
struct Foo;
impl Trait for Foo {
fn bar<'a,'b>(x: &'a str, y: &'b str) {
// error: lifetime parameters or bounds on method `bar`
// do not match the trait declaration
}
}
```
The lifetime constraint `'b` for bar() implementation does not match the
trait declaration. Ensure lifetime declarations match exactly in both trait
declaration and implementation. Example:
```
trait Trait {
fn t<'a,'b:'a>(x: &'a str, y: &'b str);
}
struct Foo;
impl Trait for Foo {
fn t<'a,'b:'a>(x: &'a str, y: &'b str) { // ok!
}
}
```
"##,
E0197: r##"
Inherent implementations (one that do not implement a trait but provide
methods associated with a type) are always safe because they are not
implementing an unsafe trait. Removing the `unsafe` keyword from the inherent
implementation will resolve this error.
```compile_fail
struct Foo;
// this will cause this error
unsafe impl Foo { }
// converting it to this will fix it
impl Foo { }
```
"##,
E0198: r##"
A negative implementation is one that excludes a type from implementing a
particular trait. Not being able to use a trait is always a safe operation,
so negative implementations are always safe and never need to be marked as
unsafe.
```compile_fail
#![feature(optin_builtin_traits)]
struct Foo;
// unsafe is unnecessary
unsafe impl !Clone for Foo { }
```
This will compile:
```
#![feature(optin_builtin_traits)]
struct Foo;
trait Enterprise {}
impl Enterprise for .. { }
impl !Enterprise for Foo { }
```
Please note that negative impls are only allowed for traits with default impls.
"##,
E0199: r##"
Safe traits should not have unsafe implementations, therefore marking an
implementation for a safe trait unsafe will cause a compiler error. Removing
the unsafe marker on the trait noted in the error will resolve this problem.
```compile_fail
struct Foo;
trait Bar { }
// this won't compile because Bar is safe
unsafe impl Bar for Foo { }
// this will compile
impl Bar for Foo { }
```
"##,
E0200: r##"
Unsafe traits must have unsafe implementations. This error occurs when an
implementation for an unsafe trait isn't marked as unsafe. This may be resolved
by marking the unsafe implementation as unsafe.
```compile_fail
struct Foo;
unsafe trait Bar { }
// this won't compile because Bar is unsafe and impl isn't unsafe
impl Bar for Foo { }
// this will compile
unsafe impl Bar for Foo { }
```
"##,
E0201: r##"
It is an error to define two associated items (like methods, associated types,
associated functions, etc.) with the same identifier.
For example:
```compile_fail
struct Foo(u8);
impl Foo {
fn bar(&self) -> bool { self.0 > 5 }
fn bar() {} // error: duplicate associated function
}
trait Baz {
type Quux;
fn baz(&self) -> bool;
}
impl Baz for Foo {
type Quux = u32;
fn baz(&self) -> bool { true }
// error: duplicate method
fn baz(&self) -> bool { self.0 > 5 }
// error: duplicate associated type
type Quux = u32;
}
```
Note, however, that items with the same name are allowed for inherent `impl`
blocks that don't overlap:
```
struct Foo<T>(T);
impl Foo<u8> {
fn bar(&self) -> bool { self.0 > 5 }
}
impl Foo<bool> {
fn bar(&self) -> bool { self.0 }
}
```
"##,
E0202: r##"
Inherent associated types were part of [RFC 195] but are not yet implemented.
See [the tracking issue][iss8995] for the status of this implementation.
[RFC 195]: https://github.com/rust-lang/rfcs/pull/195
[iss8995]: https://github.com/rust-lang/rust/issues/8995
"##,
E0204: r##"
An attempt to implement the `Copy` trait for a struct failed because one of the
fields does not implement `Copy`. To fix this, you must implement `Copy` for the
mentioned field. Note that this may not be possible, as in the example of
```compile_fail
struct Foo {
foo : Vec<u32>,
}
impl Copy for Foo { }
```
This fails because `Vec<T>` does not implement `Copy` for any `T`.
Here's another example that will fail:
```compile_fail
#[derive(Copy)]
struct Foo<'a> {
ty: &'a mut bool,
}
```
This fails because `&mut T` is not `Copy`, even when `T` is `Copy` (this
differs from the behavior for `&T`, which is always `Copy`).
"##,
E0205: r##"
An attempt to implement the `Copy` trait for an enum failed because one of the
variants does not implement `Copy`. To fix this, you must implement `Copy` for
the mentioned variant. Note that this may not be possible, as in the example of
```compile_fail
enum Foo {
Bar(Vec<u32>),
Baz,
}
impl Copy for Foo { }
```
This fails because `Vec<T>` does not implement `Copy` for any `T`.
Here's another example that will fail:
```compile_fail
#[derive(Copy)]
enum Foo<'a> {
Bar(&'a mut bool),
Baz
}
```
This fails because `&mut T` is not `Copy`, even when `T` is `Copy` (this
differs from the behavior for `&T`, which is always `Copy`).
"##,
E0206: r##"
You can only implement `Copy` for a struct or enum. Both of the following
examples will fail, because neither `i32` (primitive type) nor `&'static Bar`
(reference to `Bar`) is a struct or enum:
```compile_fail
type Foo = i32;
impl Copy for Foo { } // error
#[derive(Copy, Clone)]
struct Bar;
impl Copy for &'static Bar { } // error
```
"##,
E0207: r##"
Any type parameter or lifetime parameter of an `impl` must meet at least one of
the following criteria:
- it appears in the self type of the impl
- for a trait impl, it appears in the trait reference
- it is bound as an associated type
### Error example 1
Suppose we have a struct `Foo` and we would like to define some methods for it.
The following definition leads to a compiler error:
```compile_fail
struct Foo;
impl<T: Default> Foo {
// error: the type parameter `T` is not constrained by the impl trait, self
// type, or predicates [E0207]
fn get(&self) -> T {
<T as Default>::default()
}
}
```
The problem is that the parameter `T` does not appear in the self type (`Foo`)
of the impl. In this case, we can fix the error by moving the type parameter
from the `impl` to the method `get`:
```
struct Foo;
// Move the type parameter from the impl to the method
impl Foo {
fn get<T: Default>(&self) -> T {
<T as Default>::default()
}
}
```
### Error example 2
As another example, suppose we have a `Maker` trait and want to establish a
type `FooMaker` that makes `Foo`s:
```compile_fail
trait Maker {
type Item;
fn make(&mut self) -> Self::Item;
}
struct Foo<T> {
foo: T
}
struct FooMaker;
impl<T: Default> Maker for FooMaker {
// error: the type parameter `T` is not constrained by the impl trait, self
// type, or predicates [E0207]
type Item = Foo<T>;
fn make(&mut self) -> Foo<T> {
Foo { foo: <T as Default>::default() }
}
}
```
This fails to compile because `T` does not appear in the trait or in the
implementing type.
One way to work around this is to introduce a phantom type parameter into
`FooMaker`, like so:
```
use std::marker::PhantomData;
trait Maker {
type Item;
fn make(&mut self) -> Self::Item;
}
struct Foo<T> {
foo: T
}
// Add a type parameter to `FooMaker`
struct FooMaker<T> {
phantom: PhantomData<T>,
}
impl<T: Default> Maker for FooMaker<T> {
type Item = Foo<T>;
fn make(&mut self) -> Foo<T> {
Foo {
foo: <T as Default>::default(),
}
}
}
```
Another way is to do away with the associated type in `Maker` and use an input
type parameter instead:
```
// Use a type parameter instead of an associated type here
trait Maker<Item> {
fn make(&mut self) -> Item;
}
struct Foo<T> {
foo: T
}
struct FooMaker;
impl<T: Default> Maker<Foo<T>> for FooMaker {
fn make(&mut self) -> Foo<T> {
Foo { foo: <T as Default>::default() }
}
}
```
### Additional information
For more information, please see [RFC 447].
[RFC 447]: https://github.com/rust-lang/rfcs/blob/master/text/0447-no-unused-impl-parameters.md
"##,
E0210: r##"
This error indicates a violation of one of Rust's orphan rules for trait
implementations. The rule concerns the use of type parameters in an
implementation of a foreign trait (a trait defined in another crate), and
states that type parameters must be "covered" by a local type. To understand
what this means, it is perhaps easiest to consider a few examples.
If `ForeignTrait` is a trait defined in some external crate `foo`, then the
following trait `impl` is an error:
```compile_fail
extern crate foo;
use foo::ForeignTrait;
impl<T> ForeignTrait for T { } // error
```
To work around this, it can be covered with a local type, `MyType`:
```ignore
struct MyType<T>(T);
impl<T> ForeignTrait for MyType<T> { } // Ok
```
Please note that a type alias is not sufficient.
For another example of an error, suppose there's another trait defined in `foo`
named `ForeignTrait2` that takes two type parameters. Then this `impl` results
in the same rule violation:
```compile_fail
struct MyType2;
impl<T> ForeignTrait2<T, MyType<T>> for MyType2 { } // error
```
The reason for this is that there are two appearances of type parameter `T` in
the `impl` header, both as parameters for `ForeignTrait2`. The first appearance
is uncovered, and so runs afoul of the orphan rule.
Consider one more example:
```ignore
impl<T> ForeignTrait2<MyType<T>, T> for MyType2 { } // Ok
```
This only differs from the previous `impl` in that the parameters `T` and
`MyType<T>` for `ForeignTrait2` have been swapped. This example does *not*
violate the orphan rule; it is permitted.
To see why that last example was allowed, you need to understand the general
rule. Unfortunately this rule is a bit tricky to state. Consider an `impl`:
```ignore
impl<P1, ..., Pm> ForeignTrait<T1, ..., Tn> for T0 { ... }
```
where `P1, ..., Pm` are the type parameters of the `impl` and `T0, ..., Tn`
are types. One of the types `T0, ..., Tn` must be a local type (this is another
orphan rule, see the explanation for E0117). Let `i` be the smallest integer
such that `Ti` is a local type. Then no type parameter can appear in any of the
`Tj` for `j < i`.
For information on the design of the orphan rules, see [RFC 1023].
[RFC 1023]: https://github.com/rust-lang/rfcs/pull/1023
"##,
/*
E0211: r##"
You used a function or type which doesn't fit the requirements for where it was
used. Erroneous code examples:
```compile_fail
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn size_of<T>(); // error: intrinsic has wrong type
}
// or:
fn main() -> i32 { 0 }
// error: main function expects type: `fn() {main}`: expected (), found i32
// or:
let x = 1u8;
match x {
0u8...3i8 => (),
// error: mismatched types in range: expected u8, found i8
_ => ()
}
// or:
use std::rc::Rc;
struct Foo;
impl Foo {
fn x(self: Rc<Foo>) {}
// error: mismatched self type: expected `Foo`: expected struct
// `Foo`, found struct `alloc::rc::Rc`
}
```
For the first code example, please check the function definition. Example:
```
#![feature(intrinsics)]
extern "rust-intrinsic" {
fn size_of<T>() -> usize; // ok!
}
```
The second case example is a bit particular : the main function must always
have this definition:
```compile_fail
fn main();
```
They never take parameters and never return types.
For the third example, when you match, all patterns must have the same type
as the type you're matching on. Example:
```
let x = 1u8;
match x {
0u8...3u8 => (), // ok!
_ => ()
}
```
And finally, for the last example, only `Box<Self>`, `&Self`, `Self`,
or `&mut Self` work as explicit self parameters. Example:
```
struct Foo;
impl Foo {
fn x(self: Box<Foo>) {} // ok!
}
```
"##,
*/
E0214: r##"
A generic type was described using parentheses rather than angle brackets. For
example:
```compile_fail
fn main() {
let v: Vec(&str) = vec!["foo"];
}
```
This is not currently supported: `v` should be defined as `Vec<&str>`.
Parentheses are currently only used with generic types when defining parameters
for `Fn`-family traits.
"##,
E0220: r##"
You used an associated type which isn't defined in the trait.
Erroneous code example:
```compile_fail
trait T1 {
type Bar;
}
type Foo = T1<F=i32>; // error: associated type `F` not found for `T1`
// or:
trait T2 {
type Bar;
// error: Baz is used but not declared
fn return_bool(&self, &Self::Bar, &Self::Baz) -> bool;
}
```
Make sure that you have defined the associated type in the trait body.
Also, verify that you used the right trait or you didn't misspell the
associated type name. Example:
```
trait T1 {
type Bar;
}
type Foo = T1<Bar=i32>; // ok!
// or:
trait T2 {
type Bar;
type Baz; // we declare `Baz` in our trait.
// and now we can use it here:
fn return_bool(&self, &Self::Bar, &Self::Baz) -> bool;
}
```
"##,
E0221: r##"
An attempt was made to retrieve an associated type, but the type was ambiguous.
For example:
```compile_fail
trait T1 {}
trait T2 {}
trait Foo {
type A: T1;
}
trait Bar : Foo {
type A: T2;
fn do_something() {
let _: Self::A;
}
}
```
In this example, `Foo` defines an associated type `A`. `Bar` inherits that type
from `Foo`, and defines another associated type of the same name. As a result,
when we attempt to use `Self::A`, it's ambiguous whether we mean the `A` defined
by `Foo` or the one defined by `Bar`.
There are two options to work around this issue. The first is simply to rename
one of the types. Alternatively, one can specify the intended type using the
following syntax:
```
trait T1 {}
trait T2 {}
trait Foo {
type A: T1;
}
trait Bar : Foo {
type A: T2;
fn do_something() {
let _: <Self as Bar>::A;
}
}
```
"##,
E0223: r##"
An attempt was made to retrieve an associated type, but the type was ambiguous.
For example:
```compile_fail
trait MyTrait {type X; }
fn main() {
let foo: MyTrait::X;
}
```
The problem here is that we're attempting to take the type of X from MyTrait.
Unfortunately, the type of X is not defined, because it's only made concrete in
implementations of the trait. A working version of this code might look like:
```
trait MyTrait {type X; }
struct MyStruct;
impl MyTrait for MyStruct {
type X = u32;
}
fn main() {
let foo: <MyStruct as MyTrait>::X;
}
```
This syntax specifies that we want the X type from MyTrait, as made concrete in
MyStruct. The reason that we cannot simply use `MyStruct::X` is that MyStruct
might implement two different traits with identically-named associated types.
This syntax allows disambiguation between the two.
"##,
E0225: r##"
You attempted to use multiple types as bounds for a closure or trait object.
Rust does not currently support this. A simple example that causes this error:
```compile_fail
fn main() {
let _: Box<std::io::Read + std::io::Write>;
}
```
Builtin traits are an exception to this rule: it's possible to have bounds of
one non-builtin type, plus any number of builtin types. For example, the
following compiles correctly:
```
fn main() {
let _: Box<std::io::Read + Send + Sync>;
}
```
"##,
E0232: r##"
The attribute must have a value. Erroneous code example:
```compile_fail
#![feature(on_unimplemented)]
#[rustc_on_unimplemented] // error: this attribute must have a value
trait Bar {}
```
Please supply the missing value of the attribute. Example:
```
#![feature(on_unimplemented)]
#[rustc_on_unimplemented = "foo"] // ok!
trait Bar {}
```
"##,
E0243: r##"
This error indicates that not enough type parameters were found in a type or
trait.
For example, the `Foo` struct below is defined to be generic in `T`, but the
type parameter is missing in the definition of `Bar`:
```compile_fail
struct Foo<T> { x: T }
struct Bar { x: Foo }
```
"##,
E0244: r##"
This error indicates that too many type parameters were found in a type or
trait.
For example, the `Foo` struct below has no type parameters, but is supplied
with two in the definition of `Bar`:
```compile_fail
struct Foo { x: bool }
struct Bar<S, T> { x: Foo<S, T> }
```
"##,
E0248: r##"
This error indicates an attempt to use a value where a type is expected. For
example:
```compile_fail
enum Foo {
Bar(u32)
}
fn do_something(x: Foo::Bar) { }
```
In this example, we're attempting to take a type of `Foo::Bar` in the
do_something function. This is not legal: `Foo::Bar` is a value of type `Foo`,
not a distinct static type. Likewise, it's not legal to attempt to
`impl Foo::Bar`: instead, you must `impl Foo` and then pattern match to specify
behavior for specific enum variants.
"##,
E0318: r##"
Default impls for a trait must be located in the same crate where the trait was
defined. For more information see the [opt-in builtin traits RFC](https://github
.com/rust-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md).
"##,
E0321: r##"
A cross-crate opt-out trait was implemented on something which wasn't a struct
or enum type. Erroneous code example:
```compile_fail
#![feature(optin_builtin_traits)]
struct Foo;
impl !Sync for Foo {}
unsafe impl Send for &'static Foo {
// error: cross-crate traits with a default impl, like `core::marker::Send`,
// can only be implemented for a struct/enum type, not
// `&'static Foo`
```
Only structs and enums are permitted to impl Send, Sync, and other opt-out
trait, and the struct or enum must be local to the current crate. So, for
example, `unsafe impl Send for Rc<Foo>` is not allowed.
"##,
E0322: r##"
The `Sized` trait is a special trait built-in to the compiler for types with a
constant size known at compile-time. This trait is automatically implemented
for types as needed by the compiler, and it is currently disallowed to
explicitly implement it for a type.
"##,
E0323: r##"
An associated const was implemented when another trait item was expected.
Erroneous code example:
```compile_fail
#![feature(associated_consts)]
trait Foo {
type N;
}
struct Bar;
impl Foo for Bar {
const N : u32 = 0;
// error: item `N` is an associated const, which doesn't match its
// trait `<Bar as Foo>`
}
```
Please verify that the associated const wasn't misspelled and the correct trait
was implemented. Example:
```
struct Bar;
trait Foo {
type N;
}
impl Foo for Bar {
type N = u32; // ok!
}
```
Or:
```
#![feature(associated_consts)]
struct Bar;
trait Foo {
const N : u32;
}
impl Foo for Bar {
const N : u32 = 0; // ok!
}
```
"##,
E0324: r##"
A method was implemented when another trait item was expected. Erroneous
code example:
```compile_fail
struct Bar;
trait Foo {
const N : u32;
fn M();
}
impl Foo for Bar {
fn N() {}
// error: item `N` is an associated method, which doesn't match its
// trait `<Bar as Foo>`
}
```
To fix this error, please verify that the method name wasn't misspelled and
verify that you are indeed implementing the correct trait items. Example:
```
#![feature(associated_consts)]
struct Bar;
trait Foo {
const N : u32;
fn M();
}
impl Foo for Bar {
const N : u32 = 0;
fn M() {} // ok!
}
```
"##,
E0325: r##"
An associated type was implemented when another trait item was expected.
Erroneous code example:
```compile_fail
struct Bar;
trait Foo {
const N : u32;
}
impl Foo for Bar {
type N = u32;
// error: item `N` is an associated type, which doesn't match its
// trait `<Bar as Foo>`
}
```
Please verify that the associated type name wasn't misspelled and your
implementation corresponds to the trait definition. Example:
```
struct Bar;
trait Foo {
type N;
}
impl Foo for Bar {
type N = u32; // ok!
}
```
Or:
```
#![feature(associated_consts)]
struct Bar;
trait Foo {
const N : u32;
}
impl Foo for Bar {
const N : u32 = 0; // ok!
}
```
"##,
E0326: r##"
The types of any associated constants in a trait implementation must match the
types in the trait definition. This error indicates that there was a mismatch.
Here's an example of this error:
```compile_fail
trait Foo {
const BAR: bool;
}
struct Bar;
impl Foo for Bar {
const BAR: u32 = 5; // error, expected bool, found u32
}
```
"##,
E0329: r##"
An attempt was made to access an associated constant through either a generic
type parameter or `Self`. This is not supported yet. An example causing this
error is shown below:
```ignore
#![feature(associated_consts)]
trait Foo {
const BAR: f64;
}
struct MyStruct;
impl Foo for MyStruct {
const BAR: f64 = 0f64;
}
fn get_bar_bad<F: Foo>(t: F) -> f64 {
F::BAR
}
```
Currently, the value of `BAR` for a particular type can only be accessed
through a concrete type, as shown below:
```ignore
#![feature(associated_consts)]
trait Foo {
const BAR: f64;
}
struct MyStruct;
fn get_bar_good() -> f64 {
<MyStruct as Foo>::BAR
}
```
"##,
E0366: r##"
An attempt was made to implement `Drop` on a concrete specialization of a
generic type. An example is shown below:
```compile_fail
struct Foo<T> {
t: T
}
impl Drop for Foo<u32> {
fn drop(&mut self) {}
}
```
This code is not legal: it is not possible to specialize `Drop` to a subset of
implementations of a generic type. One workaround for this is to wrap the
generic type, as shown below:
```
struct Foo<T> {
t: T
}
struct Bar {
t: Foo<u32>
}
impl Drop for Bar {
fn drop(&mut self) {}
}
```
"##,
E0367: r##"
An attempt was made to implement `Drop` on a specialization of a generic type.
An example is shown below:
```compile_fail
trait Foo{}
struct MyStruct<T> {
t: T
}
impl<T: Foo> Drop for MyStruct<T> {
fn drop(&mut self) {}
}
```
This code is not legal: it is not possible to specialize `Drop` to a subset of
implementations of a generic type. In order for this code to work, `MyStruct`
must also require that `T` implements `Foo`. Alternatively, another option is
to wrap the generic type in another that specializes appropriately:
```
trait Foo{}
struct MyStruct<T> {
t: T
}
struct MyStructWrapper<T: Foo> {
t: MyStruct<T>
}
impl <T: Foo> Drop for MyStructWrapper<T> {
fn drop(&mut self) {}
}
```
"##,
E0368: r##"
This error indicates that a binary assignment operator like `+=` or `^=` was
applied to a type that doesn't support it. For example:
```compile_fail
let mut x = 12f32; // error: binary operation `<<` cannot be applied to
// type `f32`
x <<= 2;
```
To fix this error, please check that this type implements this binary
operation. Example:
```
let mut x = 12u32; // the `u32` type does implement the `ShlAssign` trait
x <<= 2; // ok!
```
It is also possible to overload most operators for your own type by
implementing the `[OP]Assign` traits from `std::ops`.
Another problem you might be facing is this: suppose you've overloaded the `+`
operator for some type `Foo` by implementing the `std::ops::Add` trait for
`Foo`, but you find that using `+=` does not work, as in this example:
```compile_fail
use std::ops::Add;
struct Foo(u32);
impl Add for Foo {
type Output = Foo;
fn add(self, rhs: Foo) -> Foo {
Foo(self.0 + rhs.0)
}
}
fn main() {
let mut x: Foo = Foo(5);
x += Foo(7); // error, `+= cannot be applied to the type `Foo`
}
```
This is because `AddAssign` is not automatically implemented, so you need to
manually implement it for your type.
"##,
E0369: r##"
A binary operation was attempted on a type which doesn't support it.
Erroneous code example:
```compile_fail
let x = 12f32; // error: binary operation `<<` cannot be applied to
// type `f32`
x << 2;
```
To fix this error, please check that this type implements this binary
operation. Example:
```
let x = 12u32; // the `u32` type does implement it:
// https://doc.rust-lang.org/stable/std/ops/trait.Shl.html
x << 2; // ok!
```
It is also possible to overload most operators for your own type by
implementing traits from `std::ops`.
"##,
E0370: r##"
The maximum value of an enum was reached, so it cannot be automatically
set in the next enum value. Erroneous code example:
```compile_fail
#[deny(overflowing_literals)]
enum Foo {
X = 0x7fffffffffffffff,
Y, // error: enum discriminant overflowed on value after
// 9223372036854775807: i64; set explicitly via
// Y = -9223372036854775808 if that is desired outcome
}
```
To fix this, please set manually the next enum value or put the enum variant
with the maximum value at the end of the enum. Examples:
```
enum Foo {
X = 0x7fffffffffffffff,
Y = 0, // ok!
}
```
Or:
```
enum Foo {
Y = 0, // ok!
X = 0x7fffffffffffffff,
}
```
"##,
E0371: r##"
When `Trait2` is a subtrait of `Trait1` (for example, when `Trait2` has a
definition like `trait Trait2: Trait1 { ... }`), it is not allowed to implement
`Trait1` for `Trait2`. This is because `Trait2` already implements `Trait1` by
definition, so it is not useful to do this.
Example:
```compile_fail
trait Foo { fn foo(&self) { } }
trait Bar: Foo { }
trait Baz: Bar { }
impl Bar for Baz { } // error, `Baz` implements `Bar` by definition
impl Foo for Baz { } // error, `Baz` implements `Bar` which implements `Foo`
impl Baz for Baz { } // error, `Baz` (trivially) implements `Baz`
impl Baz for Bar { } // Note: This is OK
```
"##,
E0374: r##"
A struct without a field containing an unsized type cannot implement
`CoerceUnsized`. An
[unsized type](https://doc.rust-lang.org/book/unsized-types.html)
is any type that the compiler doesn't know the length or alignment of at
compile time. Any struct containing an unsized type is also unsized.
Example of erroneous code:
```compile_fail
#![feature(coerce_unsized)]
use std::ops::CoerceUnsized;
struct Foo<T: ?Sized> {
a: i32,
}
// error: Struct `Foo` has no unsized fields that need `CoerceUnsized`.
impl<T, U> CoerceUnsized<Foo<U>> for Foo<T>
where T: CoerceUnsized<U> {}
```
`CoerceUnsized` is used to coerce one struct containing an unsized type
into another struct containing a different unsized type. If the struct
doesn't have any fields of unsized types then you don't need explicit
coercion to get the types you want. To fix this you can either
not try to implement `CoerceUnsized` or you can add a field that is
unsized to the struct.
Example:
```
#![feature(coerce_unsized)]
use std::ops::CoerceUnsized;
// We don't need to impl `CoerceUnsized` here.
struct Foo {
a: i32,
}
// We add the unsized type field to the struct.
struct Bar<T: ?Sized> {
a: i32,
b: T,
}
// The struct has an unsized field so we can implement
// `CoerceUnsized` for it.
impl<T, U> CoerceUnsized<Bar<U>> for Bar<T>
where T: CoerceUnsized<U> {}
```
Note that `CoerceUnsized` is mainly used by smart pointers like `Box`, `Rc`
and `Arc` to be able to mark that they can coerce unsized types that they
are pointing at.
"##,
E0375: r##"
A struct with more than one field containing an unsized type cannot implement
`CoerceUnsized`. This only occurs when you are trying to coerce one of the
types in your struct to another type in the struct. In this case we try to
impl `CoerceUnsized` from `T` to `U` which are both types that the struct
takes. An [unsized type](https://doc.rust-lang.org/book/unsized-types.html)
is any type that the compiler doesn't know the length or alignment of at
compile time. Any struct containing an unsized type is also unsized.
Example of erroneous code:
```compile_fail
#![feature(coerce_unsized)]
use std::ops::CoerceUnsized;
struct Foo<T: ?Sized, U: ?Sized> {
a: i32,
b: T,
c: U,
}
// error: Struct `Foo` has more than one unsized field.
impl<T, U> CoerceUnsized<Foo<U, T>> for Foo<T, U> {}
```
`CoerceUnsized` only allows for coercion from a structure with a single
unsized type field to another struct with a single unsized type field.
In fact Rust only allows for a struct to have one unsized type in a struct
and that unsized type must be the last field in the struct. So having two
unsized types in a single struct is not allowed by the compiler. To fix this
use only one field containing an unsized type in the struct and then use
multiple structs to manage each unsized type field you need.
Example:
```
#![feature(coerce_unsized)]
use std::ops::CoerceUnsized;
struct Foo<T: ?Sized> {
a: i32,
b: T,
}
impl <T, U> CoerceUnsized<Foo<U>> for Foo<T>
where T: CoerceUnsized<U> {}
fn coerce_foo<T: CoerceUnsized<U>, U>(t: T) -> Foo<U> {
Foo { a: 12i32, b: t } // we use coercion to get the `Foo<U>` type we need
}
```
"##,
E0376: r##"
The type you are trying to impl `CoerceUnsized` for is not a struct.
`CoerceUnsized` can only be implemented for a struct. Unsized types are
already able to be coerced without an implementation of `CoerceUnsized`
whereas a struct containing an unsized type needs to know the unsized type
field it's containing is able to be coerced. An
[unsized type](https://doc.rust-lang.org/book/unsized-types.html)
is any type that the compiler doesn't know the length or alignment of at
compile time. Any struct containing an unsized type is also unsized.
Example of erroneous code:
```compile_fail
#![feature(coerce_unsized)]
use std::ops::CoerceUnsized;
struct Foo<T: ?Sized> {
a: T,
}
// error: The type `U` is not a struct
impl<T, U> CoerceUnsized<U> for Foo<T> {}
```
The `CoerceUnsized` trait takes a struct type. Make sure the type you are
providing to `CoerceUnsized` is a struct with only the last field containing an
unsized type.
Example:
```
#![feature(coerce_unsized)]
use std::ops::CoerceUnsized;
struct Foo<T> {
a: T,
}
// The `Foo<U>` is a struct so `CoerceUnsized` can be implemented
impl<T, U> CoerceUnsized<Foo<U>> for Foo<T> where T: CoerceUnsized<U> {}
```
Note that in Rust, structs can only contain an unsized type if the field
containing the unsized type is the last and only unsized type field in the
struct.
"##,
E0379: r##"
Trait methods cannot be declared `const` by design. For more information, see
[RFC 911].
[RFC 911]: https://github.com/rust-lang/rfcs/pull/911
"##,
E0380: r##"
Default impls are only allowed for traits with no methods or associated items.
For more information see the [opt-in builtin traits RFC](https://github.com/rust
-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md).
"##,
E0390: r##"
You tried to implement methods for a primitive type. Erroneous code example:
```compile_fail
struct Foo {
x: i32
}
impl *mut Foo {}
// error: only a single inherent implementation marked with
// `#[lang = "mut_ptr"]` is allowed for the `*mut T` primitive
```
This isn't allowed, but using a trait to implement a method is a good solution.
Example:
```
struct Foo {
x: i32
}
trait Bar {
fn bar();
}
impl Bar for *mut Foo {
fn bar() {} // ok!
}
```
"##,
E0391: r##"
This error indicates that some types or traits depend on each other
and therefore cannot be constructed.
The following example contains a circular dependency between two traits:
```compile_fail
trait FirstTrait : SecondTrait {
}
trait SecondTrait : FirstTrait {
}
```
"##,
E0392: r##"
This error indicates that a type or lifetime parameter has been declared
but not actually used. Here is an example that demonstrates the error:
```compile_fail
enum Foo<T> {
Bar
}
```
If the type parameter was included by mistake, this error can be fixed
by simply removing the type parameter, as shown below:
```
enum Foo {
Bar
}
```
Alternatively, if the type parameter was intentionally inserted, it must be
used. A simple fix is shown below:
```
enum Foo<T> {
Bar(T)
}
```
This error may also commonly be found when working with unsafe code. For
example, when using raw pointers one may wish to specify the lifetime for
which the pointed-at data is valid. An initial attempt (below) causes this
error:
```compile_fail
struct Foo<'a, T> {
x: *const T
}
```
We want to express the constraint that Foo should not outlive `'a`, because
the data pointed to by `T` is only valid for that lifetime. The problem is
that there are no actual uses of `'a`. It's possible to work around this
by adding a PhantomData type to the struct, using it to tell the compiler
to act as if the struct contained a borrowed reference `&'a T`:
```
use std::marker::PhantomData;
struct Foo<'a, T: 'a> {
x: *const T,
phantom: PhantomData<&'a T>
}
```
PhantomData can also be used to express information about unused type
parameters. You can read more about it in the API documentation:
https://doc.rust-lang.org/std/marker/struct.PhantomData.html
"##,
E0393: r##"
A type parameter which references `Self` in its default value was not specified.
Example of erroneous code:
```compile_fail
trait A<T=Self> {}
fn together_we_will_rule_the_galaxy(son: &A) {}
// error: the type parameter `T` must be explicitly specified in an
// object type because its default value `Self` references the
// type `Self`
```
A trait object is defined over a single, fully-defined trait. With a regular
default parameter, this parameter can just be substituted in. However, if the
default parameter is `Self`, the trait changes for each concrete type; i.e.
`i32` will be expected to implement `A<i32>`, `bool` will be expected to
implement `A<bool>`, etc... These types will not share an implementation of a
fully-defined trait; instead they share implementations of a trait with
different parameters substituted in for each implementation. This is
irreconcilable with what we need to make a trait object work, and is thus
disallowed. Making the trait concrete by explicitly specifying the value of the
defaulted parameter will fix this issue. Fixed example:
```
trait A<T=Self> {}
fn together_we_will_rule_the_galaxy(son: &A<i32>) {} // Ok!
```
"##,
E0439: r##"
The length of the platform-intrinsic function `simd_shuffle`
wasn't specified. Erroneous code example:
```compile_fail
#![feature(platform_intrinsics)]
extern "platform-intrinsic" {
fn simd_shuffle<A,B>(a: A, b: A, c: [u32; 8]) -> B;
// error: invalid `simd_shuffle`, needs length: `simd_shuffle`
}
```
The `simd_shuffle` function needs the length of the array passed as
last parameter in its name. Example:
```
#![feature(platform_intrinsics)]
extern "platform-intrinsic" {
fn simd_shuffle8<A,B>(a: A, b: A, c: [u32; 8]) -> B;
}
```
"##,
E0440: r##"
A platform-specific intrinsic function has the wrong number of type
parameters. Erroneous code example:
```compile_fail
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct f64x2(f64, f64);
extern "platform-intrinsic" {
fn x86_mm_movemask_pd<T>(x: f64x2) -> i32;
// error: platform-specific intrinsic has wrong number of type
// parameters
}
```
Please refer to the function declaration to see if it corresponds
with yours. Example:
```
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct f64x2(f64, f64);
extern "platform-intrinsic" {
fn x86_mm_movemask_pd(x: f64x2) -> i32;
}
```
"##,
E0441: r##"
An unknown platform-specific intrinsic function was used. Erroneous
code example:
```compile_fail
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
extern "platform-intrinsic" {
fn x86_mm_adds_ep16(x: i16x8, y: i16x8) -> i16x8;
// error: unrecognized platform-specific intrinsic function
}
```
Please verify that the function name wasn't misspelled, and ensure
that it is declared in the rust source code (in the file
src/librustc_platform_intrinsics/x86.rs). Example:
```
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
extern "platform-intrinsic" {
fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i16x8; // ok!
}
```
"##,
E0442: r##"
Intrinsic argument(s) and/or return value have the wrong type.
Erroneous code example:
```compile_fail
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct i8x16(i8, i8, i8, i8, i8, i8, i8, i8,
i8, i8, i8, i8, i8, i8, i8, i8);
#[repr(simd)]
struct i32x4(i32, i32, i32, i32);
#[repr(simd)]
struct i64x2(i64, i64);
extern "platform-intrinsic" {
fn x86_mm_adds_epi16(x: i8x16, y: i32x4) -> i64x2;
// error: intrinsic arguments/return value have wrong type
}
```
To fix this error, please refer to the function declaration to give
it the awaited types. Example:
```
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
extern "platform-intrinsic" {
fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i16x8; // ok!
}
```
"##,
E0443: r##"
Intrinsic argument(s) and/or return value have the wrong type.
Erroneous code example:
```compile_fail
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
#[repr(simd)]
struct i64x8(i64, i64, i64, i64, i64, i64, i64, i64);
extern "platform-intrinsic" {
fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i64x8;
// error: intrinsic argument/return value has wrong type
}
```
To fix this error, please refer to the function declaration to give
it the awaited types. Example:
```
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
extern "platform-intrinsic" {
fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i16x8; // ok!
}
```
"##,
E0444: r##"
A platform-specific intrinsic function has wrong number of arguments.
Erroneous code example:
```compile_fail
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct f64x2(f64, f64);
extern "platform-intrinsic" {
fn x86_mm_movemask_pd(x: f64x2, y: f64x2, z: f64x2) -> i32;
// error: platform-specific intrinsic has invalid number of arguments
}
```
Please refer to the function declaration to see if it corresponds
with yours. Example:
```
#![feature(repr_simd)]
#![feature(platform_intrinsics)]
#[repr(simd)]
struct f64x2(f64, f64);
extern "platform-intrinsic" {
fn x86_mm_movemask_pd(x: f64x2) -> i32; // ok!
}
```
"##,
E0516: r##"
The `typeof` keyword is currently reserved but unimplemented.
Erroneous code example:
```compile_fail
fn main() {
let x: typeof(92) = 92;
}
```
Try using type inference instead. Example:
```
fn main() {
let x = 92;
}
```
"##,
E0520: r##"
A non-default implementation was already made on this type so it cannot be
specialized further. Erroneous code example:
```compile_fail
#![feature(specialization)]
trait SpaceLlama {
fn fly(&self);
}
// applies to all T
impl<T> SpaceLlama for T {
default fn fly(&self) {}
}
// non-default impl
// applies to all `Clone` T and overrides the previous impl
impl<T: Clone> SpaceLlama for T {
fn fly(&self) {}
}
// since `i32` is clone, this conflicts with the previous implementation
impl SpaceLlama for i32 {
default fn fly(&self) {}
// error: item `fly` is provided by an `impl` that specializes
// another, but the item in the parent `impl` is not marked
// `default` and so it cannot be specialized.
}
```
Specialization only allows you to override `default` functions in
implementations.
To fix this error, you need to mark all the parent implementations as default.
Example:
```
#![feature(specialization)]
trait SpaceLlama {
fn fly(&self);
}
// applies to all T
impl<T> SpaceLlama for T {
default fn fly(&self) {} // This is a parent implementation.
}
// applies to all `Clone` T; overrides the previous impl
impl<T: Clone> SpaceLlama for T {
default fn fly(&self) {} // This is a parent implementation but was
// previously not a default one, causing the error
}
// applies to i32, overrides the previous two impls
impl SpaceLlama for i32 {
fn fly(&self) {} // And now that's ok!
}
```
"##,
E0527: r##"
The number of elements in an array or slice pattern differed from the number of
elements in the array being matched.
Example of erroneous code:
```compile_fail,E0527
#![feature(slice_patterns)]
let r = &[1, 2, 3, 4];
match r {
&[a, b] => { // error: pattern requires 2 elements but array
// has 4
println!("a={}, b={}", a, b);
}
}
```
Ensure that the pattern is consistent with the size of the matched
array. Additional elements can be matched with `..`:
```
#![feature(slice_patterns)]
let r = &[1, 2, 3, 4];
match r {
&[a, b, ..] => { // ok!
println!("a={}, b={}", a, b);
}
}
```
"##,
E0528: r##"
An array or slice pattern required more elements than were present in the
matched array.
Example of erroneous code:
```compile_fail,E0528
#![feature(slice_patterns)]
let r = &[1, 2];
match r {
&[a, b, c, rest..] => { // error: pattern requires at least 3
// elements but array has 2
println!("a={}, b={}, c={} rest={:?}", a, b, c, rest);
}
}
```
Ensure that the matched array has at least as many elements as the pattern
requires. You can match an arbitrary number of remaining elements with `..`:
```
#![feature(slice_patterns)]
let r = &[1, 2, 3, 4, 5];
match r {
&[a, b, c, rest..] => { // ok!
// prints `a=1, b=2, c=3 rest=[4, 5]`
println!("a={}, b={}, c={} rest={:?}", a, b, c, rest);
}
}
```
"##,
E0529: r##"
An array or slice pattern was matched against some other type.
Example of erroneous code:
```compile_fail,E0529
#![feature(slice_patterns)]
let r: f32 = 1.0;
match r {
[a, b] => { // error: expected an array or slice, found `f32`
println!("a={}, b={}", a, b);
}
}
```
Ensure that the pattern and the expression being matched on are of consistent
types:
```
#![feature(slice_patterns)]
let r = [1.0, 2.0];
match r {
[a, b] => { // ok!
println!("a={}, b={}", a, b);
}
}
```
"##,
E0559: r##"
An unknown field was specified into an enum's structure variant.
Erroneous code example:
```compile_fail,E0559
enum Field {
Fool { x: u32 },
}
let s = Field::Fool { joke: 0 };
// error: struct variant `Field::Fool` has no field named `joke`
```
Verify you didn't misspell the field's name or that the field exists. Example:
```
enum Field {
Fool { joke: u32 },
}
let s = Field::Fool { joke: 0 }; // ok!
```
"##,
E0560: r##"
An unknown field was specified into a structure.
Erroneous code example:
```compile_fail,E0560
struct Simba {
mother: u32,
}
let s = Simba { mother: 1, father: 0 };
// error: structure `Simba` has no field named `father`
```
Verify you didn't misspell the field's name or that the field exists. Example:
```
struct Simba {
mother: u32,
father: u32,
}
let s = Simba { mother: 1, father: 0 }; // ok!
```
"##,
}
register_diagnostics! {
// E0068,
// E0085,
// E0086,
E0090,
E0103, // @GuillaumeGomez: I was unable to get this error, try your best!
E0104,
// E0123,
// E0127,
// E0129,
// E0141,
// E0159, // use of trait `{}` as struct constructor
// E0163, // merged into E0071
E0167,
// E0168,
// E0173, // manual implementations of unboxed closure traits are experimental
// E0174,
E0182,
E0183,
// E0187, // can't infer the kind of the closure
// E0188, // can not cast an immutable reference to a mutable pointer
// E0189, // deprecated: can only cast a boxed pointer to a boxed object
// E0190, // deprecated: can only cast a &-pointer to an &-object
E0196, // cannot determine a type for this closure
E0203, // type parameter has more than one relaxed default bound,
// and only one is supported
E0208,
// E0209, // builtin traits can only be implemented on structs or enums
E0212, // cannot extract an associated type from a higher-ranked trait bound
// E0213, // associated types are not accepted in this context
// E0215, // angle-bracket notation is not stable with `Fn`
// E0216, // parenthetical notation is only stable with `Fn`
// E0217, // ambiguous associated type, defined in multiple supertraits
// E0218, // no associated type defined
// E0219, // associated type defined in higher-ranked supertrait
// E0222, // Error code E0045 (variadic function must have C calling
// convention) duplicate
E0224, // at least one non-builtin train is required for an object type
E0226, // only a single explicit lifetime bound is permitted
E0227, // ambiguous lifetime bound, explicit lifetime bound required
E0228, // explicit lifetime bound required
E0230, // there is no type parameter on trait
E0231, // only named substitution parameters are allowed
// E0233,
// E0234,
// E0235, // structure constructor specifies a structure of type but
// E0236, // no lang item for range syntax
// E0237, // no lang item for range syntax
E0238, // parenthesized parameters may only be used with a trait
// E0239, // `next` method of `Iterator` trait has unexpected type
// E0240,
// E0241,
// E0242,
E0245, // not a trait
// E0246, // invalid recursive type
// E0247,
// E0249,
// E0319, // trait impls for defaulted traits allowed just for structs/enums
E0320, // recursive overflow during dropck
E0328, // cannot implement Unsize explicitly
// E0372, // coherence not object safe
E0377, // the trait `CoerceUnsized` may only be implemented for a coercion
// between structures with the same definition
E0399, // trait items need to be implemented because the associated
// type `{}` was overridden
E0436, // functional record update requires a struct
E0513, // no type for local variable ..
E0521, // redundant default implementations of trait
E0533, // `{}` does not name a unit variant, unit struct or a constant
}
| 21.956703 | 95 | 0.673403 |
71e18d2b6f9499c7199df549adb1d5ea0861d1b1 | 10,975 | use rustc::traits::{
GoalKind,
Clause,
ProgramClause,
ProgramClauseCategory,
};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::subst::{Kind, InternalSubsts, Subst};
use rustc::hir;
use rustc::hir::def_id::DefId;
use crate::lowering::Lower;
use crate::generic_types;
/// Returns a predicate of the form
/// `Implemented(ty: Trait) :- Implemented(nested: Trait)...`
/// where `Trait` is specified by `trait_def_id`.
fn builtin_impl_clause(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
nested: &[Kind<'tcx>],
trait_def_id: DefId,
) -> ProgramClause<'tcx> {
ProgramClause {
goal: ty::TraitPredicate {
trait_ref: ty::TraitRef {
def_id: trait_def_id,
substs: tcx.mk_substs_trait(ty, &[]),
},
}.lower(),
hypotheses: tcx.mk_goals(
nested.iter()
.cloned()
.map(|nested_ty| ty::TraitRef {
def_id: trait_def_id,
substs: tcx.mk_substs_trait(nested_ty.expect_ty(), &[]),
})
.map(|trait_ref| ty::TraitPredicate { trait_ref })
.map(|pred| GoalKind::DomainGoal(pred.lower()))
.map(|goal_kind| tcx.mk_goal(goal_kind))
),
category: ProgramClauseCategory::Other,
}
}
crate fn assemble_builtin_unsize_impls<'tcx>(
tcx: TyCtxt<'tcx>,
unsize_def_id: DefId,
source: Ty<'tcx>,
target: Ty<'tcx>,
clauses: &mut Vec<Clause<'tcx>>,
) {
match (&source.sty, &target.sty) {
(ty::Dynamic(data_a, ..), ty::Dynamic(data_b, ..)) => {
if data_a.principal_def_id() != data_b.principal_def_id()
|| data_b.auto_traits().any(|b| data_a.auto_traits().all(|a| a != b))
{
return;
}
// FIXME: rules for trait upcast
}
(_, &ty::Dynamic(..)) => {
// FIXME: basically, we should have something like:
// ```
// forall<T> {
// Implemented(T: Unsize< for<...> dyn Trait<...> >) :-
// for<...> Implemented(T: Trait<...>).
// }
// ```
// The question is: how to correctly handle the higher-ranked
// `for<...>` binder in order to have a generic rule?
// (Having generic rules is useful for caching, as we may be able
// to turn this function and others into tcx queries later on).
}
(ty::Array(_, length), ty::Slice(_)) => {
let ty_param = generic_types::bound(tcx, 0);
let array_ty = tcx.mk_ty(ty::Array(ty_param, length));
let slice_ty = tcx.mk_ty(ty::Slice(ty_param));
// `forall<T> { Implemented([T; N]: Unsize<[T]>). }`
let clause = ProgramClause {
goal: ty::TraitPredicate {
trait_ref: ty::TraitRef {
def_id: unsize_def_id,
substs: tcx.mk_substs_trait(array_ty, &[slice_ty.into()])
},
}.lower(),
hypotheses: ty::List::empty(),
category: ProgramClauseCategory::Other,
};
clauses.push(Clause::ForAll(ty::Binder::bind(clause)));
}
(ty::Infer(ty::TyVar(_)), _) | (_, ty::Infer(ty::TyVar(_))) => {
// FIXME: ambiguous
}
(ty::Adt(def_id_a, ..), ty::Adt(def_id_b, ..)) => {
if def_id_a != def_id_b {
return;
}
// FIXME: rules for struct unsizing
}
(&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
if tys_a.len() != tys_b.len() {
return;
}
// FIXME: rules for tuple unsizing
}
_ => (),
}
}
crate fn assemble_builtin_sized_impls<'tcx>(
tcx: TyCtxt<'tcx>,
sized_def_id: DefId,
ty: Ty<'tcx>,
clauses: &mut Vec<Clause<'tcx>>,
) {
let mut push_builtin_impl = |ty: Ty<'tcx>, nested: &[Kind<'tcx>]| {
let clause = builtin_impl_clause(tcx, ty, nested, sized_def_id);
// Bind innermost bound vars that may exist in `ty` and `nested`.
clauses.push(Clause::ForAll(ty::Binder::bind(clause)));
};
match &ty.sty {
// Non parametric primitive types.
ty::Bool |
ty::Char |
ty::Int(..) |
ty::Uint(..) |
ty::Float(..) |
ty::Infer(ty::IntVar(_)) |
ty::Infer(ty::FloatVar(_)) |
ty::Error |
ty::Never => push_builtin_impl(ty, &[]),
// These ones are always `Sized`.
&ty::Array(_, length) => {
push_builtin_impl(tcx.mk_ty(ty::Array(generic_types::bound(tcx, 0), length)), &[]);
}
ty::RawPtr(ptr) => {
push_builtin_impl(generic_types::raw_ptr(tcx, ptr.mutbl), &[]);
}
&ty::Ref(_, _, mutbl) => {
push_builtin_impl(generic_types::ref_ty(tcx, mutbl), &[]);
}
ty::FnPtr(fn_ptr) => {
let fn_ptr = fn_ptr.skip_binder();
let fn_ptr = generic_types::fn_ptr(
tcx,
fn_ptr.inputs_and_output.len(),
fn_ptr.c_variadic,
fn_ptr.unsafety,
fn_ptr.abi
);
push_builtin_impl(fn_ptr, &[]);
}
&ty::FnDef(def_id, ..) => {
push_builtin_impl(generic_types::fn_def(tcx, def_id), &[]);
}
&ty::Closure(def_id, ..) => {
push_builtin_impl(generic_types::closure(tcx, def_id), &[]);
}
&ty::Generator(def_id, ..) => {
push_builtin_impl(generic_types::generator(tcx, def_id), &[]);
}
// `Sized` if the last type is `Sized` (because else we will get a WF error anyway).
&ty::Tuple(type_list) => {
let type_list = generic_types::type_list(tcx, type_list.len());
push_builtin_impl(tcx.mk_ty(ty::Tuple(type_list)), &type_list);
}
// Struct def
ty::Adt(adt_def, _) => {
let substs = InternalSubsts::bound_vars_for_item(tcx, adt_def.did);
let adt = tcx.mk_ty(ty::Adt(adt_def, substs));
let sized_constraint = adt_def.sized_constraint(tcx)
.iter()
.map(|ty| Kind::from(ty.subst(tcx, substs)))
.collect::<Vec<_>>();
push_builtin_impl(adt, &sized_constraint);
}
// Artificially trigger an ambiguity by adding two possible types to
// unify against.
ty::Infer(ty::TyVar(_)) => {
push_builtin_impl(tcx.types.i32, &[]);
push_builtin_impl(tcx.types.f32, &[]);
}
ty::Projection(_projection_ty) => {
// FIXME: add builtin impls from the associated type values found in
// trait impls of `projection_ty.trait_ref(tcx)`.
}
// The `Sized` bound can only come from the environment.
ty::Param(..) |
ty::Placeholder(..) |
ty::UnnormalizedProjection(..) => (),
// Definitely not `Sized`.
ty::Foreign(..) |
ty::Str |
ty::Slice(..) |
ty::Dynamic(..) |
ty::Opaque(..) => (),
ty::Bound(..) |
ty::GeneratorWitness(..) |
ty::Infer(ty::FreshTy(_)) |
ty::Infer(ty::FreshIntTy(_)) |
ty::Infer(ty::FreshFloatTy(_)) => bug!("unexpected type {:?}", ty),
}
}
crate fn assemble_builtin_copy_clone_impls<'tcx>(
tcx: TyCtxt<'tcx>,
trait_def_id: DefId,
ty: Ty<'tcx>,
clauses: &mut Vec<Clause<'tcx>>,
) {
let mut push_builtin_impl = |ty: Ty<'tcx>, nested: &[Kind<'tcx>]| {
let clause = builtin_impl_clause(tcx, ty, nested, trait_def_id);
// Bind innermost bound vars that may exist in `ty` and `nested`.
clauses.push(Clause::ForAll(ty::Binder::bind(clause)));
};
match &ty.sty {
// Implementations provided in libcore.
ty::Bool |
ty::Char |
ty::Int(..) |
ty::Uint(..) |
ty::Float(..) |
ty::RawPtr(..) |
ty::Never |
ty::Ref(_, _, hir::MutImmutable) => (),
// Non parametric primitive types.
ty::Infer(ty::IntVar(_)) |
ty::Infer(ty::FloatVar(_)) |
ty::Error => push_builtin_impl(ty, &[]),
// These implement `Copy`/`Clone` if their element types do.
&ty::Array(_, length) => {
let element_ty = generic_types::bound(tcx, 0);
push_builtin_impl(
tcx.mk_ty(ty::Array(element_ty, length)),
&[Kind::from(element_ty)],
);
}
&ty::Tuple(type_list) => {
let type_list = generic_types::type_list(tcx, type_list.len());
push_builtin_impl(tcx.mk_ty(ty::Tuple(type_list)), &**type_list);
}
&ty::Closure(def_id, ..) => {
let closure_ty = generic_types::closure(tcx, def_id);
let upvar_tys: Vec<_> = match &closure_ty.sty {
ty::Closure(_, substs) => {
substs.upvar_tys(def_id, tcx).map(|ty| Kind::from(ty)).collect()
},
_ => bug!(),
};
push_builtin_impl(closure_ty, &upvar_tys);
}
// These ones are always `Clone`.
ty::FnPtr(fn_ptr) => {
let fn_ptr = fn_ptr.skip_binder();
let fn_ptr = generic_types::fn_ptr(
tcx,
fn_ptr.inputs_and_output.len(),
fn_ptr.c_variadic,
fn_ptr.unsafety,
fn_ptr.abi
);
push_builtin_impl(fn_ptr, &[]);
}
&ty::FnDef(def_id, ..) => {
push_builtin_impl(generic_types::fn_def(tcx, def_id), &[]);
}
// These depend on whatever user-defined impls might exist.
ty::Adt(_, _) => (),
// Artificially trigger an ambiguity by adding two possible types to
// unify against.
ty::Infer(ty::TyVar(_)) => {
push_builtin_impl(tcx.types.i32, &[]);
push_builtin_impl(tcx.types.f32, &[]);
}
ty::Projection(_projection_ty) => {
// FIXME: add builtin impls from the associated type values found in
// trait impls of `projection_ty.trait_ref(tcx)`.
}
// The `Copy`/`Clone` bound can only come from the environment.
ty::Param(..) |
ty::Placeholder(..) |
ty::UnnormalizedProjection(..) |
ty::Opaque(..) => (),
// Definitely not `Copy`/`Clone`.
ty::Dynamic(..) |
ty::Foreign(..) |
ty::Generator(..) |
ty::Str |
ty::Slice(..) |
ty::Ref(_, _, hir::MutMutable) => (),
ty::Bound(..) |
ty::GeneratorWitness(..) |
ty::Infer(ty::FreshTy(_)) |
ty::Infer(ty::FreshIntTy(_)) |
ty::Infer(ty::FreshFloatTy(_)) => bug!("unexpected type {:?}", ty),
}
}
| 33.460366 | 95 | 0.505057 |
fe5b1cdaec7eeb2ddf574d958ffac6fd4ff542aa | 3,726 | use crate::*;
use holochain_zome_types::prelude::*;
use std::sync::Arc;
/// Extend holo_hash::AgentPubKey with additional signature functionality
/// from Keystore.
pub trait AgentPubKeyExt {
/// create a new agent keypair in given keystore, returning the AgentPubKey
fn new_from_pure_entropy(
keystore: &KeystoreSender,
) -> KeystoreApiFuture<holo_hash::AgentPubKey>
where
Self: Sized;
/// sign some arbitrary data
fn sign<S>(&self, keystore: &KeystoreSender, data: S) -> KeystoreApiFuture<Signature>
where
S: Serialize;
/// sign some arbitrary raw bytes
fn sign_raw(&self, keystore: &KeystoreSender, data: &[u8]) -> KeystoreApiFuture<Signature>;
/// verify a signature for given data with this agent public_key is valid
fn verify_signature<D>(&self, signature: &Signature, data: D) -> KeystoreApiFuture<bool>
where
D: TryInto<SerializedBytes, Error = SerializedBytesError>;
/// verify a signature for given raw bytes with this agent public_key is valid
fn verify_signature_raw(&self, signature: &Signature, data: &[u8]) -> KeystoreApiFuture<bool>;
}
impl AgentPubKeyExt for holo_hash::AgentPubKey {
fn new_from_pure_entropy(keystore: &KeystoreSender) -> KeystoreApiFuture<holo_hash::AgentPubKey>
where
Self: Sized,
{
let f = keystore.generate_sign_keypair_from_pure_entropy();
ghost_actor::dependencies::must_future::MustBoxFuture::new(async move { f.await })
}
fn sign<S>(&self, keystore: &KeystoreSender, input: S) -> KeystoreApiFuture<Signature>
where
S: Serialize,
{
use ghost_actor::dependencies::futures::future::FutureExt;
let keystore = keystore.clone();
let maybe_data: Result<Vec<u8>, SerializedBytesError> =
holochain_serialized_bytes::encode(&input);
let key = self.clone();
async move {
let data = maybe_data?;
keystore.sign(Sign { key, data }).await
}
.boxed()
.into()
}
fn sign_raw(&self, keystore: &KeystoreSender, data: &[u8]) -> KeystoreApiFuture<Signature> {
use ghost_actor::dependencies::futures::future::FutureExt;
let keystore = keystore.clone();
let input = Sign::new_raw(self.clone(), data.to_vec());
async move { keystore.sign(input).await }.boxed().into()
}
fn verify_signature<D>(&self, signature: &Signature, data: D) -> KeystoreApiFuture<bool>
where
D: TryInto<SerializedBytes, Error = SerializedBytesError>,
{
use ghost_actor::dependencies::futures::future::FutureExt;
let pub_key: lair_keystore_api::internal::sign_ed25519::SignEd25519PubKey =
self.get_raw_32().to_vec().into();
let sig: lair_keystore_api::internal::sign_ed25519::SignEd25519Signature =
signature.0.to_vec().into();
let data: Result<SerializedBytes, SerializedBytesError> = data.try_into();
async move {
let data = Arc::new(data?.bytes().to_vec());
Ok(pub_key.verify(data, sig).await?)
}
.boxed()
.into()
}
fn verify_signature_raw(&self, signature: &Signature, data: &[u8]) -> KeystoreApiFuture<bool> {
use ghost_actor::dependencies::futures::future::FutureExt;
let data = Arc::new(data.to_vec());
let pub_key: lair_keystore_api::internal::sign_ed25519::SignEd25519PubKey =
self.get_raw_32().to_vec().into();
let sig: lair_keystore_api::internal::sign_ed25519::SignEd25519Signature =
signature.0.to_vec().into();
async move { Ok(pub_key.verify(data, sig).await?) }
.boxed()
.into()
}
}
| 37.26 | 100 | 0.649222 |
ab2ed010180da06c0dd6ff2914ff7f8b30f529ae | 7,647 | use std::old_io::File;
use std::old_io::SeekStyle::SeekSet;
use std::old_io::IoErrorKind::EndOfFile;
use elem::{PerElem, Molecule};
use error::{CTError, CTResult};
use error::CTErrorKind::{InputError, DatabaseError};
macro_rules! read_err (
() => (Err(CTError {
kind: DatabaseError,
desc: "Error reading the database".to_string(),
pos: None
}));
);
#[derive(Debug, PartialEq)]
pub struct ElemData {
pub short_name: String,
pub long_name: String,
pub mass: f64,
pub atomic_num: u16,
}
pub struct ElemDatabase {
db: File,
}
impl ElemDatabase {
/// Try to make the database with the file at the given oath
pub fn open(path: &Path) -> CTResult<ElemDatabase> {
match File::open(path) {
Ok(db_file) => Ok(ElemDatabase { db: db_file }),
Err(_) => Err(CTError {
kind: DatabaseError,
desc: format!("Could not open database file. Expected at: {:?}",
path.as_str().unwrap_or("same directory as the program")),
pos: None,
}),
}
}
/// Try to get the data matching the given PerElem.
///
/// This function errors if the PerElem could not be found, or the database
/// could not be read.
pub fn get_single_data(&mut self, elem: &PerElem) -> CTResult<ElemData> {
// since the elements should be sorted before we get their data from the database
// we should never have to seek back to the beginning of the file
if let Ok(data) = self.do_data_search(elem) {
Ok(data)
} else {
// but in case they weren't, we return to the beginning of the underlying file, since
// the data might lie on a line we have previously read past
self.db.seek(0, SeekSet).ok().expect("Internal error reading database");
self.do_data_search(elem)
}
}
/// Try to get the data for all the provided PerElems.
///
/// This function errors if one of the PerElem could not be found, or the
/// database could not be read.
pub fn get_data(&mut self, elems: &Molecule) -> CTResult<Vec<ElemData>> {
let mut out = Vec::new();
for elem in elems.iter() {
match self.get_single_data(elem) {
Ok(data) => out.push(data),
Err(e) => return Err(e),
}
}
Ok(out)
}
fn do_data_search(&mut self, elem: &PerElem) -> CTResult<ElemData> {
loop {
// TODO: make it so this function returns the 'not found' error
let line = try!(self.read_line(elem));
if line.starts_with(elem.name.as_slice()) {
return decode_line(&line);
}
}
}
fn read_line(&mut self, elem: &PerElem) -> CTResult<String> {
// we know that no line in the database is more than 30 characters long
let mut buf = Vec::with_capacity(30);
loop {
match self.db.read_byte() {
Ok(b) if b == b'\n' => break,
Ok(b) => buf.push(b),
Err(ref e) if e.kind == EndOfFile => return Err(CTError {
kind: InputError,
desc: format!("Could not find element: {:?}", elem.name),
pos: Some((elem.pos, elem.len)),
}),
Err(_) => return read_err!()
}
}
String::from_utf8(buf).or_else(|_| read_err!())
}
}
fn decode_line(line: &String) -> CTResult<ElemData> {
let data: Vec<&str> = line.trim().split(';').collect();
if data.len() < 4 {
Err(CTError {
kind: DatabaseError,
desc: "Missing field in database".to_string(),
pos: None
})
} else {
let mass = data[1].parse::<f64>();
let atomic_num = data[3].parse::<u16>();
if let (Ok(m), Ok(an)) = (mass, atomic_num) {
Ok(ElemData {
short_name: data[0].to_string(),
long_name: data[2].to_string(),
mass: m,
atomic_num: an,
})
} else {
Err(CTError {
kind: DatabaseError,
desc: "Field in database corrupted".to_string(),
pos: None,
})
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::old_io::File;
use std::old_io::fs;
use elem::PerElem;
fn make_dummy_db(name: &str, contents: &str) -> ElemDatabase {
if let Err(e) = File::create(&Path::new(name)).and_then(|mut f| f.write_str(contents)) {
// if we can't make the database we can't test, so just abort here
panic!("Could not create dummy database: {:?}", e.desc);
}
ElemDatabase::open(&Path::new(name)).unwrap()
}
fn remove_dummy_db(name: &str) {
if let Err(e) = fs::unlink(&Path::new(name)) {
// if we can't remove the database something is wrong, and we abort the test
panic!("Could not remove dummy database: {:?}", e.desc);
}
}
#[test]
fn multiple_elems() {
let db_name = "multiple_elems_db";
let mut db = make_dummy_db(db_name,
"A;1;Abba;2\n\
B;3;Beta;4\n");
let raw_result = db.get_data(&vec!(
PerElem { name: "B".to_string(), coef: 1, pos: 0, len: 1 },
PerElem { name: "A".to_string(), coef: 1, pos: 1, len: 1 }
));
let expected = vec!(
ElemData {
short_name: "B".to_string(),
long_name: "Beta".to_string(),
mass: 3.0,
atomic_num: 4,
},
ElemData {
short_name: "A".to_string(),
long_name: "Abba".to_string(),
mass: 1.0,
atomic_num: 2,
}
);
remove_dummy_db(db_name);
assert_eq!(Ok(expected), raw_result);
}
#[test]
fn find_elem() {
let db_name = "find_elem_db";
let mut db = make_dummy_db(db_name,
"A;0;Abba;0\n\
B;123.456789;Beta;12\n\
C;0;Coop;0\n");
let raw_result = db.get_single_data(
&PerElem { name: "B".to_string(), coef: 1, pos: 0, len: 2 }
);
let expected = ElemData {
short_name: "B".to_string(),
long_name: "Beta".to_string(),
mass: 123.456789,
atomic_num: 12,
};
remove_dummy_db(db_name);
assert_eq!(Ok(expected), raw_result);
}
#[test]
fn missing_elem() {
let db_name = "missing_elem_db";
let mut db = make_dummy_db(db_name, "A;123.456789;Abba;12\n");
let result = db.get_single_data(
&PerElem { name: "B".to_string(), coef: 1, pos: 0, len: 2 }
);
remove_dummy_db(db_name);
assert!(result.is_err());
}
#[test]
fn missing_field() {
let db_name = "missing_field_db";
let mut db = make_dummy_db(db_name, "A;");
let result = db.get_single_data(
&PerElem { name: "A".to_string(), coef: 1, pos: 0, len: 2 }
);
remove_dummy_db(db_name);
assert!(result.is_err());
}
#[test]
fn field_corrupted() {
let db_name = "field_corrupted_db";
let mut db = make_dummy_db(db_name, "A;not a number;Abba;12\n");
let result = db.get_single_data(
&PerElem { name: "A".to_string(), coef: 1, pos: 0, len: 2 }
);
remove_dummy_db(db_name);
assert!(result.is_err());
}
} | 32.679487 | 97 | 0.522689 |
b96271dc41dd06fa11dc43f2a06372ee1ccd99ca | 7,450 | use crate::{format_recursive, FormattingOptions};
use expect_test::{expect, Expect};
fn check(before: &str, after: Expect) {
check_with_options(before, after, &FormattingOptions::default())
}
fn check_with_options(before: &str, after: Expect, options: &FormattingOptions) {
let syntax = syntax::parse(before.trim_start())
.syntax()
.clone_for_update();
format_recursive(syntax.clone(), options);
eprintln!("{:#?}", syntax);
let new = syntax.to_string();
after.assert_eq(&new);
}
#[test]
fn format_empty() {
check("", expect![[""]]);
}
#[test]
fn format_fn_header() {
check(
"fn main ( a : b ) -> f32 {}",
expect![[r#"fn main(a: b) -> f32 {}"#]],
);
}
#[test]
fn format_fn_header_2() {
check(
"fn main ( a : b, c : d ) -> f32 {}",
expect![[r#"fn main(a: b, c: d) -> f32 {}"#]],
);
}
#[test]
fn format_fn_header_comma_oneline() {
check(
"fn main(a: b , c: d ,) -> f32 {}",
expect![[r#"fn main(a: b, c: d) -> f32 {}"#]],
);
}
#[test]
fn format_fn_header_comma_multiline() {
check(
"fn main(
a: b , c: d ,) -> f32 {}",
expect![[r#"
fn main(
a: b,
c: d,
) -> f32 {}"#]],
);
}
#[test]
fn format_fn_header_missing_comma() {
check(
"fn main(a: b c: d) {}",
expect![[r#"fn main(a: b, c: d) {}"#]],
);
}
#[test]
fn format_fn_header_no_ws() {
check(
"fn main(a:b)->f32{}",
expect![[r#"fn main(a: b) -> f32 {}"#]],
);
}
#[test]
fn format_fn_newline() {
check(
"fn main(
a:b
)->f32{}",
expect![[r#"
fn main(
a: b
) -> f32 {}"#]],
);
}
#[test]
fn format_fn_newline_2() {
check(
"fn main(
a:b, c:d)->f32{}",
expect![[r#"
fn main(
a: b,
c: d
) -> f32 {}"#]],
);
}
#[test]
fn format_fn_newline_3() {
check(
"fn main(
a:b,
c:d
)->f32{}",
expect![[r#"
fn main(
a: b,
c: d
) -> f32 {}"#]],
);
}
#[test]
fn format_multiple_fns() {
check(
"
fn main( a: b ) -> f32 {}
fn main( a: b ) -> f32 {}
",
expect![[r#"
fn main(a: b) -> f32 {}
fn main(a: b) -> f32 {}
"#]],
);
}
#[test]
fn format_struct() {
check(
"
struct Test {}
",
expect![[r#"
struct Test {}
"#]],
);
}
#[test]
fn format_bevy_function() {
check(
"fn directional_light(light: DirectionalLight, roughness: f32, NdotV: f32, normal: vec3<f32>, view: vec3<f32>, R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>) -> vec3<f32> {}",
expect![["fn directional_light(light: DirectionalLight, roughness: f32, NdotV: f32, normal: vec3<f32>, view: vec3<f32>, R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>) -> vec3<f32> {}"]],
)
}
#[test]
fn format_bevy_function_2() {
check(
"fn specular(f0: vec3<f32>, roughness: f32, h: vec3<f32>, NoV: f32, NoL: f32,
NoH: f32, LoH: f32, specularIntensity: f32) -> vec3<f32> {",
expect![["fn specular(f0: vec3<f32>, roughness: f32, h: vec3<f32>, NoV: f32, NoL: f32, NoH: f32, LoH: f32, specularIntensity: f32) -> vec3<f32> {"]],
)
}
#[test]
fn format_if() {
check(
"fn main() {
if(x < 1){}
if ( x < 1 ) {}
}",
expect![[r#"
fn main() {
if (x < 1) {}
if (x < 1) {}
}"#]],
);
}
#[test]
fn format_if_2() {
check(
"fn main() {
if(x < 1){}
else{
let a = 3;
}else if( x > 2 ){}
}",
expect![[r#"
fn main() {
if (x < 1) {} else {
let a = 3;
} else if (x > 2) {}
}"#]],
);
}
#[test]
fn format_for() {
check(
"fn main() {
for( var i = 0;i < 100; i = i + 1 ){}
}",
expect![[r#"
fn main() {
for (var i = 0; i < 100; i = i + 1) {}
}"#]],
);
}
#[test]
fn format_function_call() {
check(
"fn main() {
min ( x,y );
}",
expect![[r#"
fn main() {
min(x, y);
}"#]],
);
}
#[test]
fn format_function_call_newline() {
check(
"fn main() {
min (
x,y );
}",
expect![[r#"
fn main() {
min(
x,
y
);
}"#]],
);
}
#[test]
fn format_function_call_newline_indent() {
check(
"fn main() {
if (false) {
min (
x,y );
}
}",
expect![[r#"
fn main() {
if (false) {
min(
x,
y
);
}
}"#]],
);
}
#[test]
fn format_function_call_newline_nested() {
check(
"fn main() {
min(
min(
1,
2,
)
)
}",
expect![[r#"
fn main() {
min(
min(
1,
2,
)
)
}"#]],
);
}
#[test]
fn format_function_call_2() {
check(
"fn main() {
vec3 <f32> ( x,y,z );
}",
expect![[r#"
fn main() {
vec3<f32>(x, y, z);
}"#]],
);
}
#[test]
fn format_infix_expr() {
check(
"fn main() {
x+y*z;
}",
expect![[r#"
fn main() {
x + y * z;
}"#]],
);
}
#[test]
fn format_assignment() {
check(
"fn main() {
x=0;
y += x + y;
}",
expect![[r#"
fn main() {
x = 0;
y += x + y;
}"#]],
);
}
#[test]
fn format_variable() {
check(
"fn main() {
var x=0;
}",
expect![[r#"
fn main() {
var x = 0;
}"#]],
);
}
#[test]
fn format_statement_indent() {
check(
"fn main() {
var x=0;
}",
expect![[r#"
fn main() {
var x = 0;
}"#]],
);
}
#[test]
fn format_statement_indent_nested() {
check(
"fn main() {
for() {
if() {
var x = 0;
}
}
}",
expect![[r#"
fn main() {
for () {
if () {
var x = 0;
}
}
}"#]],
);
}
#[test]
fn format_statements_newline() {
check(
"fn main() {
let x = 3;
let y = 4;
}",
expect![[r#"
fn main() {
let x = 3;
let y = 4;
}"#]],
);
}
#[test]
fn format_expr_shift_right() {
check(
"fn main() { let x = 1u >> 3u; }",
expect![[r#"fn main() { let x = 1u >> 3u; }"#]],
);
}
#[test]
fn format_expr_shift_left() {
check(
"fn main() { let x = 1u << 3u; }",
expect![[r#"fn main() { let x = 1u << 3u; }"#]],
);
}
| 18.625 | 205 | 0.363087 |
64dcc0337ade6929234a9aa08d3ab9357b1390e6 | 2,274 | //! Group 7: cutter radius compensation
use crate::Span;
use crate::{value::Value, word::parse_word};
use nom::{
branch::alt, bytes::complete::tag_no_case, character::complete::space0, combinator::map,
combinator::opt, sequence::separated_pair, IResult,
};
#[derive(Debug, PartialEq, Clone)]
pub enum CutterCompensation<'a> {
/// `G40`
Off,
/// `G41`
Left {
/// Optional tool number to read offset from.
tool_number: Option<Value<'a>>,
},
/// `G42`
Right {
/// Optional tool number to read offset from.
tool_number: Option<Value<'a>>,
},
}
impl<'a> CutterCompensation<'a> {
pub fn parse(i: Span<'a>) -> IResult<Span<'a>, Self> {
let tool_number = |tag: &'static str| {
map(
separated_pair(tag_no_case(tag), space0, opt(parse_word(tag_no_case("D")))),
|(_, d)| d.map(|(_c, value)| value),
)
};
alt((
map(tag_no_case("G40"), |_| CutterCompensation::Off),
map(tool_number("G41"), |tool_number| CutterCompensation::Left {
tool_number,
}),
map(tool_number("G42"), |tool_number| {
CutterCompensation::Right { tool_number }
}),
))(i)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::assert_parse;
#[test]
fn no_d() {
assert_parse!(
CutterCompensation::parse,
"G41;",
(";", CutterCompensation::Left { tool_number: None })
);
assert_parse!(
CutterCompensation::parse,
"G42;",
(";", CutterCompensation::Right { tool_number: None })
);
}
#[test]
fn with_d() {
assert_parse!(
CutterCompensation::parse,
"G41 D13;",
(
";",
CutterCompensation::Left {
tool_number: Some(13.into())
}
)
);
assert_parse!(
CutterCompensation::parse,
"G42 D1;",
(
";",
CutterCompensation::Right {
tool_number: Some(1.into())
}
)
);
}
}
| 24.717391 | 92 | 0.478452 |
18b54abef11a8ba3882a728fc5a2c853856ffdba | 260 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
pub mod components;
pub mod manifests;
pub mod package_list;
pub mod packages;
pub mod routes;
| 26 | 73 | 0.765385 |
eb7919e2cffd19576af08acac154837da6700b6e | 1,900 | use crate::internal::*;
#[macro_use]
pub mod helpers;
#[macro_use]
pub mod rules;
mod analyser;
mod fact;
mod factoid;
mod model;
mod ops;
mod optim;
pub use tract_core::dim::TDim;
pub use self::fact::InferenceFact;
pub use self::factoid::*;
pub use self::model::InferenceModelExt;
pub use self::ops::InferenceOp;
pub use self::rules::expr::IntoExp;
pub use self::rules::expr::ToDimExp;
pub use self::rules::InferenceResult;
pub use self::rules::InferenceRulesOp;
pub use self::rules::Solver;
pub use self::rules::TensorProxy;
pub use wrap;
pub fn check_input_arity(inputs: &[TensorProxy], expected: usize) -> TractResult<()> {
if inputs.len() != expected {
bail!("Wrong input number. Rules expect {}, node has {}.", expected, inputs.len())
} else {
Ok(())
}
}
pub fn check_output_arity(outputs: &[TensorProxy], expected: usize) -> TractResult<()> {
if outputs.len() != expected {
bail!("Wrong output number. Rules expect {}, node has {}.", expected, outputs.len())
} else {
Ok(())
}
}
/// A model with partially types and shapes, as produced by parsing ONNX or
/// Tensorflow graphs.
pub type InferenceModel = Graph<InferenceFact, Box<dyn InferenceOp>>;
/// Node for InferenceModel graph
pub type InferenceNode = Node<InferenceFact, Box<dyn InferenceOp>>;
/// A ModelPatch for InferenceModel.
pub type InferenceModelPatch = ModelPatch<InferenceFact, Box<dyn InferenceOp>>;
/// An execution plan for InferenceModel.
pub type InferenceSimplePlan<M> = SimplePlan<InferenceFact, Box<dyn InferenceOp>, M>;
/// An execution state for InferenceModel.
pub type InferenceSimpleState<M, P> = SimpleState<InferenceFact, Box<dyn InferenceOp>, M, P>;
impl<'a> From<&'a Box<dyn InferenceOp>> for Box<dyn InferenceOp> {
fn from(it: &'a Box<dyn InferenceOp>) -> Box<dyn InferenceOp> {
tract_core::dyn_clone::clone_box(it.as_ref())
}
}
| 30.645161 | 93 | 0.698947 |
0a3e31ec4f83e37125fa29af4a2ca871d3fd5394 | 1,291 | use std::fmt;
/// Formats a string as an escaped string.
///
/// This writes double quote delimiters and escapes two characters: the double quote (`"`) and
/// backslash (`\`).
pub(crate) fn write_escaped_string(f: &mut fmt::Formatter<'_>, s: &str) -> fmt::Result {
f.write_str("\"")?;
for c in s.chars() {
if matches!(c, '"' | '\\') {
f.write_str("\\")?;
}
write!(f, "{}", c)?;
}
f.write_str("\"")?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
struct EscapedStringFormat(&'static str);
impl fmt::Display for EscapedStringFormat {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write_escaped_string(f, self.0)
}
}
#[test]
fn test_write_escaped_string() {
assert_eq!(
EscapedStringFormat(r#"noodles"#).to_string(),
r#""noodles""#
);
assert_eq!(
EscapedStringFormat("noodles=🍜").to_string(),
r#""noodles=🍜""#
);
assert_eq!(
EscapedStringFormat(r#"noodles-"vcf""#).to_string(),
r#""noodles-\"vcf\"""#
);
assert_eq!(
EscapedStringFormat(r#"noodles\vcf"#).to_string(),
r#""noodles\\vcf""#
);
}
}
| 22.258621 | 94 | 0.502711 |
fca8510d04a2ec71a9f81aab3df7ae64bee54f20 | 8,088 | use hyper::{Chunk, StatusCode};
use serde_json::{Error as JsonError, Value as JsonValue};
use std::error::Error as ErrorTrait;
use std::fmt::{Display, Formatter};
use std::net::IpAddr;
/// Errors related to blocking protocol as defined by consul
#[derive(Debug, Copy, Clone)]
pub enum ProtocolError {
/// Consul did not reply with X-Consul-Index header
BlockingMissing,
/// Consul did not reply with Content-Type: application/json
ContentTypeNotJson,
/// Consul did not reply with 200 Ok status
NonOkResult(StatusCode),
/// connection refused to consul
ConnectionRefused,
/// we had an error, and consumer resetted the stream
StreamRestarted,
}
impl Display for ProtocolError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match *self {
ProtocolError::BlockingMissing => write!(f, "{}", self.description()),
ProtocolError::ContentTypeNotJson => write!(f, "{}", self.description()),
ProtocolError::NonOkResult(ref status) => {
write!(f, "Non ok result from consul: {}", status)
}
ProtocolError::ConnectionRefused => write!(f, "connection refused to consul"),
ProtocolError::StreamRestarted => write!(f, "consumer restarted the stream"),
}
}
}
impl ErrorTrait for ProtocolError {
fn description(&self) -> &str {
match *self {
ProtocolError::BlockingMissing => "X-Consul-Index missing from response",
ProtocolError::ContentTypeNotJson => "Consul replied with a non-json content",
ProtocolError::NonOkResult(_) => "Non ok result from consul",
ProtocolError::ConnectionRefused => "connection refused to consul",
ProtocolError::StreamRestarted => "consumer restarted the stream",
}
}
}
/// Error that Watch may yield *in the stream*
#[derive(Debug)]
pub enum ParseError {
/// Consul protocol error (missing header, unknown return format)
Protocol(ProtocolError),
/// Json result does not fit expected format
UnexpectedJsonFormat,
/// The data is not in json format
BodyParsing(JsonError),
}
impl Display for ParseError {
fn fmt(&self, f: &mut Formatter) -> Result<(), std::fmt::Error> {
match *self {
ParseError::Protocol(ref pe) => write!(f, "Protocol error: {}", pe),
ParseError::UnexpectedJsonFormat => write!(f, "{}", self.description()),
ParseError::BodyParsing(ref je) => write!(f, "Data not in json format: {}", je),
}
}
}
impl ErrorTrait for ParseError {
fn description(&self) -> &str {
match *self {
ParseError::Protocol(_) => "Protocol error",
ParseError::UnexpectedJsonFormat => "Unexpected json format",
ParseError::BodyParsing(_) => "Data not in json format",
}
}
}
impl From<ProtocolError> for ParseError {
fn from(e: ProtocolError) -> ParseError {
ParseError::Protocol(e)
}
}
/// Trait for parsing types out of consul json replies
pub trait ConsulReply {
/// The kind of replies this parser yields
type Reply;
/// Parse an http body and give back a result
fn parse(buf: &Chunk) -> Result<Self::Reply, ParseError>;
}
pub struct ServiceNameTag {
pub name: String,
pub tags: Vec<String>,
}
fn from_json_to_services_tags(value: &JsonValue) -> Result<Vec<ServiceNameTag>, ParseError> {
if let JsonValue::Object(ref map) = value {
let mut out = Vec::with_capacity(map.len());
for (k, v) in map.iter() {
if v.is_array() {
if k != "consul" {
let tag_vec: Vec<String> =
serde_json::from_value(v.to_owned()).map_err(ParseError::BodyParsing)?;
out.push(ServiceNameTag {
name: k.to_string(),
tags: tag_vec,
})
}
} else {
return Err(ParseError::UnexpectedJsonFormat);
}
}
Ok(out)
} else {
Err(ParseError::UnexpectedJsonFormat)
}
}
/// Parse services list in consul
#[derive(Debug)]
pub struct Services {}
impl ConsulReply for Services {
type Reply = Vec<ServiceNameTag>;
fn parse(buf: &Chunk) -> Result<Self::Reply, ParseError> {
let v: JsonValue = serde_json::from_slice(&buf).map_err(ParseError::BodyParsing)?;
from_json_to_services_tags(&v)
}
}
/// Parse node list from services in consul
#[derive(Debug)]
pub struct HealthyServiceNodes {}
#[derive(Deserialize)]
pub struct JsonUpperNode {
#[serde(rename = "Node")]
pub node: JsonInnerNode,
#[serde(rename = "Service")]
pub service: JsonInnerService,
}
/// Node hosting services
#[derive(Debug, Deserialize, PartialEq, Clone)]
pub struct JsonInnerNode {
/// Node name
#[serde(rename = "Node")]
pub name: String,
/// Node address
#[serde(rename = "Address")]
pub address: IpAddr,
}
/// Service information
#[derive(Debug, Deserialize, PartialEq, Clone)]
pub struct JsonInnerService {
/// Node name
#[serde(rename = "Service")]
pub name: String,
/// Node address
#[serde(rename = "Port")]
pub port: u16,
}
impl ConsulReply for HealthyServiceNodes {
type Reply = Vec<JsonUpperNode>;
fn parse(buf: &Chunk) -> Result<Self::Reply, ParseError> {
let v: Vec<JsonUpperNode> =
serde_json::from_slice(&buf).map_err(ParseError::BodyParsing)?;
Ok(v)
}
}
#[derive(Deserialize)]
struct JsonAgent {
#[serde(rename = "Member")]
member: JsonMember,
}
#[derive(Deserialize)]
struct JsonMember {
#[serde(rename = "Addr")]
addr: IpAddr,
}
/// Parse node list from services in consul
#[derive(Debug)]
pub struct Agent {
/// public ip address used by this address
pub member_address: IpAddr,
}
impl ConsulReply for Agent {
type Reply = Agent;
fn parse(buf: &Chunk) -> Result<Self::Reply, ParseError> {
let agent: JsonAgent = serde_json::from_slice(&buf).map_err(ParseError::BodyParsing)?;
Ok(Agent {
member_address: agent.member.addr,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_from_json_to_services_tags() {
let json = json!( {
"ais-ps-debugger": [
"trv-env-dev",
"trv-net-dev-internal",
"ais-ps-debugger"
],
"ais-reports-prod": [
"trv-net-dev-internal",
"trv-env-dev",
"ais-reports-prod"
],
});
let result = from_json_to_services_tags(&json);
assert!(result.is_ok());
let result = result.unwrap();
assert!(result.iter().any(|x| x.name == "ais-ps-debugger"));
assert!(result
.iter()
.find(|&x| x.name == "ais-ps-debugger")
.unwrap()
.tags
.iter()
.any(|t| t == "ais-ps-debugger"));
assert!(result
.iter()
.find(|&x| x.name == "ais-ps-debugger")
.unwrap()
.tags
.iter()
.any(|t| t == "trv-net-dev-internal"));
assert!(result
.iter()
.find(|&x| x.name == "ais-ps-debugger")
.unwrap()
.tags
.iter()
.any(|t| t == "trv-env-dev"));
assert!(result.iter().any(|x| x.name == "ais-reports-prod"));
assert!(result
.iter()
.find(|&x| x.name == "ais-reports-prod")
.unwrap()
.tags
.iter()
.any(|t| t == "trv-env-dev"));
assert!(result
.iter()
.find(|&x| x.name == "ais-reports-prod")
.unwrap()
.tags
.iter()
.any(|t| t == "trv-env-dev"));
assert!(result
.iter()
.find(|&x| x.name == "ais-reports-prod")
.unwrap()
.tags
.iter()
.any(|t| t == "trv-env-dev"));
}
}
| 29.304348 | 95 | 0.566642 |
e5644f823114d93aa7382e5ec7363c95fa612936 | 7,999 | // Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::time::Duration;
use std::time::Instant;
use crate::cc;
use crate::recovery::Sent;
/// Reno congestion control implementation.
pub struct Reno {
congestion_window: usize,
bytes_in_flight: usize,
congestion_recovery_start_time: Option<Instant>,
ssthresh: usize,
/* TODO: ECN is not implemented.
* ecn_ce_counters: [usize; packet::EPOCH_COUNT] */
}
impl cc::CongestionControl for Reno {
fn new() -> Self
where
Self: Sized,
{
Reno {
congestion_window: cc::INITIAL_WINDOW,
bytes_in_flight: 0,
congestion_recovery_start_time: None,
ssthresh: std::usize::MAX,
/* TODO: ECN is not implemented.
* ecn_ce_counters: [0; packet::EPOCH_COUNT], */
}
}
fn cwnd(&self) -> usize {
self.congestion_window
}
fn collapse_cwnd(&mut self) {
self.congestion_window = cc::MINIMUM_WINDOW;
}
fn bytes_in_flight(&self) -> usize {
self.bytes_in_flight
}
fn decrease_bytes_in_flight(&mut self, bytes_in_flight: usize) {
self.bytes_in_flight =
self.bytes_in_flight.saturating_sub(bytes_in_flight);
}
fn congestion_recovery_start_time(&self) -> Option<Instant> {
self.congestion_recovery_start_time
}
fn on_packet_sent_cc(
&mut self, bytes_sent: usize, _now: Instant, _trace_id: &str,
) {
self.bytes_in_flight += bytes_sent;
}
fn on_packet_acked_cc(
&mut self, packet: &Sent, _srtt: Duration, _min_rtt: Duration,
app_limited: bool, _now: Instant, _trace_id: &str,
) {
self.bytes_in_flight -= packet.size;
if self.in_congestion_recovery(packet.time) {
return;
}
if app_limited {
return;
}
if self.congestion_window < self.ssthresh {
// Slow start.
self.congestion_window += packet.size;
} else {
// Congestion avoidance.
self.congestion_window +=
(cc::MAX_DATAGRAM_SIZE * packet.size) / self.congestion_window;
}
}
fn congestion_event(
&mut self, time_sent: Instant, now: Instant, _trace_id: &str,
) {
// Start a new congestion event if packet was sent after the
// start of the previous congestion recovery period.
if !self.in_congestion_recovery(time_sent) {
self.congestion_recovery_start_time = Some(now);
self.congestion_window = (self.congestion_window as f64 *
cc::LOSS_REDUCTION_FACTOR)
as usize;
self.congestion_window =
std::cmp::max(self.congestion_window, cc::MINIMUM_WINDOW);
self.ssthresh = self.congestion_window;
}
}
}
impl std::fmt::Debug for Reno {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"cwnd={} ssthresh={} bytes_in_flight={}",
self.congestion_window, self.ssthresh, self.bytes_in_flight,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
const TRACE_ID: &str = "test_id";
#[test]
fn reno_init() {
let cc = cc::new_congestion_control(cc::Algorithm::Reno);
assert!(cc.cwnd() > 0);
assert_eq!(cc.bytes_in_flight(), 0);
}
#[test]
fn reno_send() {
let mut cc = cc::new_congestion_control(cc::Algorithm::Reno);
let now = Instant::now();
cc.on_packet_sent_cc(1000, now, TRACE_ID);
assert_eq!(cc.bytes_in_flight(), 1000);
}
#[test]
fn reno_slow_start() {
let mut cc = cc::new_congestion_control(cc::Algorithm::Reno);
let now = Instant::now();
let p = Sent {
pkt_num: 0,
frames: vec![],
time: now,
size: 5000,
ack_eliciting: true,
in_flight: true,
delivered: 0,
delivered_time: std::time::Instant::now(),
recent_delivered_packet_sent_time: std::time::Instant::now(),
is_app_limited: false,
};
// Send 5k x 4 = 20k, higher than default cwnd(~15k)
// to become no longer app limited.
cc.on_packet_sent_cc(p.size, now, TRACE_ID);
cc.on_packet_sent_cc(p.size, now, TRACE_ID);
cc.on_packet_sent_cc(p.size, now, TRACE_ID);
cc.on_packet_sent_cc(p.size, now, TRACE_ID);
let cwnd_prev = cc.cwnd();
cc.on_packet_acked_cc(
&p,
Duration::new(0, 1),
Duration::new(0, 1),
false,
now,
TRACE_ID,
);
// Check if cwnd increased by packet size (slow start).
assert_eq!(cc.cwnd(), cwnd_prev + p.size);
}
#[test]
fn reno_congestion_event() {
let mut cc = cc::new_congestion_control(cc::Algorithm::Reno);
let prev_cwnd = cc.cwnd();
let now = Instant::now();
cc.congestion_event(now, now, TRACE_ID);
// In Reno, after congestion event, cwnd will be cut in half.
assert_eq!(prev_cwnd / 2, cc.cwnd());
}
#[test]
fn reno_congestion_avoidance() {
let mut cc = cc::new_congestion_control(cc::Algorithm::Reno);
let prev_cwnd = cc.cwnd();
let now = Instant::now();
// Send 20K bytes.
cc.on_packet_sent_cc(20000, now, TRACE_ID);
cc.congestion_event(now, now, TRACE_ID);
// In Reno, after congestion event, cwnd will be cut in half.
assert_eq!(prev_cwnd / 2, cc.cwnd());
let p = Sent {
pkt_num: 0,
frames: vec![],
time: now,
size: 5000,
ack_eliciting: true,
in_flight: true,
delivered: 0,
delivered_time: std::time::Instant::now(),
recent_delivered_packet_sent_time: std::time::Instant::now(),
is_app_limited: false,
};
let prev_cwnd = cc.cwnd();
// Ack 5000 bytes.
cc.on_packet_acked_cc(
&p,
Duration::new(0, 1),
Duration::new(0, 1),
false,
now,
TRACE_ID,
);
// Check if cwnd increase is smaller than a packet size (congestion
// avoidance).
assert!(cc.cwnd() < prev_cwnd + 1111);
}
#[test]
fn reno_collapse_cwnd() {
let mut cc = cc::new_congestion_control(cc::Algorithm::Reno);
// cwnd will be reset
cc.collapse_cwnd();
assert_eq!(cc.cwnd(), cc::MINIMUM_WINDOW);
}
}
| 29.408088 | 79 | 0.595199 |
f94084efc02112ef23690b656e87d47f4f01f926 | 13,184 | #[macro_use] extern crate serde;
extern crate serde_json;
extern crate reqwest;
extern crate oping;
extern crate config;
use std::collections::HashMap;
use std::thread;
use std::time;
use std::sync;
use std::time::{SystemTime, UNIX_EPOCH};
use config::{Config, File, Environment};
const MAIN_LOOP_MSECS : u64 = 1000;
const PING_LOOP_MSECS : u64 = 1000;
const PING_TIMEOUT : f64 = 1.0;
const PING_HYST_LOOP_MSECS : u64 = 100;
const PING_HYST_LIMIT : u8 = 10;
struct PingThreadInfo {
state_id: i64,
thd: thread::JoinHandle<()>,
running: sync::Arc<sync::atomic::AtomicBool>,
finished_signal: sync::mpsc::Receiver<bool>
}
struct PingAccountingInfo {
responsive: Option<bool>,
hysteresis_responsive: u8,
hysteresis_unresponsive: u8
}
#[derive(Serialize,Deserialize,Debug)]
#[serde(rename_all = "camelCase")]
struct DeviceInfo {
fqdn: String,
up: Option<bool>,
}
#[derive(Deserialize,Debug)]
#[serde(rename_all = "camelCase")]
struct DeviceInfoResponse {
state_id: i64,
devices: Vec<DeviceInfo>,
}
fn get_devices(source_url : &String) -> Result<(HashMap<String, DeviceInfo>, i64), String> {
let mut devices : HashMap<String, DeviceInfo> = HashMap::new();
let response = reqwest::blocking::get(source_url);
match response {
Ok(response) => {
let resp_json : Result<DeviceInfoResponse, _> = response.json();
match resp_json {
Ok(device_info_response) => {
for device_info in device_info_response.devices.iter() {
devices.insert(
device_info.fqdn.clone(),
DeviceInfo {
fqdn: device_info.fqdn.clone(),
up: device_info.up
}
);
}
return Ok((devices, device_info_response.state_id));
},
Err(_) => {
return Err("Unable to parse DeviceInfoResponse JSON".to_string())
}
}
},
Err(e) => {
let status_code = e.status();
let error = format!("HTTP Error (status={:?}) while trying to get DeviceInfoResponse", status_code);
return Err(error);
}
}
}
fn pinger_prepare_instance(host : &String) -> Result<oping::Ping, oping::PingError> {
let mut oping_instance = oping::Ping::new();
match oping_instance.set_timeout(PING_TIMEOUT) {
Ok(_) => {},
Err(e) => {
return Err(e);
}
}
match oping_instance.add_host(host.as_str()) {
Ok(_) => {},
Err(e) => {
return Err(e);
}
}
return Ok(oping_instance);
}
fn send_status_update(source_url: &String, host : &String, ping_accounting_info : &mut PingAccountingInfo) {
let client = reqwest::blocking::Client::new();
let resp_obj = DeviceInfo {
up: ping_accounting_info.responsive,
fqdn: host.clone()
};
let response = client.request(reqwest::Method::PUT, source_url)
.json(&resp_obj)
.send();
match response {
Ok(_) => {},
Err(_) => {}
}
}
fn pinger_handle_host_drop(source_url: &String, host : &String, ping_accounting_info : &mut PingAccountingInfo) {
let responsive;
match ping_accounting_info.responsive {
Some(value) => { responsive = value; },
None => {
// Initial state is down!
ping_accounting_info.responsive = Some(false);
send_status_update(source_url, host, ping_accounting_info);
return;
}
}
if !responsive {
ping_accounting_info.hysteresis_responsive = 0;
ping_accounting_info.hysteresis_unresponsive = 0;
return;
} else {
ping_accounting_info.hysteresis_responsive = 0;
}
ping_accounting_info.hysteresis_unresponsive += 1;
if ping_accounting_info.hysteresis_unresponsive >= PING_HYST_LIMIT {
ping_accounting_info.responsive = Some(false);
send_status_update(source_url, host, ping_accounting_info);
println!("[{}] -> DOWN", host);
} else {
println!("[{}] <hyst> not responding ({}/{})", host, ping_accounting_info.hysteresis_unresponsive, PING_HYST_LIMIT);
}
}
fn pinger_handle_host_resp(source_url: &String, host : &String, ping_accounting_info : &mut PingAccountingInfo) {
let responsive;
match ping_accounting_info.responsive {
Some(value) => { responsive = value; },
None => {
// Initial state is up!
ping_accounting_info.responsive = Some(true);
send_status_update(source_url, host, ping_accounting_info);
return;
}
}
if responsive {
ping_accounting_info.hysteresis_responsive = 0;
ping_accounting_info.hysteresis_unresponsive = 0;
return;
} else {
ping_accounting_info.hysteresis_unresponsive = 0;
}
ping_accounting_info.hysteresis_responsive += 1;
if ping_accounting_info.hysteresis_responsive >= PING_HYST_LIMIT {
ping_accounting_info.responsive = Some(true);
send_status_update(source_url, host, ping_accounting_info);
println!("[{}] -> OK", host);
} else {
println!("[{}] <hyst> responding ({}/{})", host, ping_accounting_info.hysteresis_responsive, PING_HYST_LIMIT);
}
}
fn is_responding(ping_item : &oping::PingItem) -> bool {
if ping_item.dropped > 0 || ping_item.latency_ms < 0.0 { return false; }
return true;
}
fn pinger_process_ping_result(source_url: &String, host : &String, mut ping_accounting_info : &mut PingAccountingInfo, ping_item : oping::PingItem) {
if is_responding(&ping_item) {
pinger_handle_host_resp(source_url, host, &mut ping_accounting_info);
} else {
pinger_handle_host_drop(source_url, host, &mut ping_accounting_info);
}
}
fn pinger_perform_ping(source_url: &String, host : &String, mut ping_accounting_info : &mut PingAccountingInfo, oping_instance : oping::Ping) {
match oping_instance.send() {
Ok(oping_result) => {
match oping_result.last() {
Some(ping_result) => {
pinger_process_ping_result(source_url, host, &mut ping_accounting_info, ping_result);
},
None => {}
}
},
Err(e) => {
println!("[{}] ping error: {:?}", host, e);
}
}
}
fn get_time_msecs() -> u64 {
let start = SystemTime::now();
match start.duration_since(UNIX_EPOCH) {
Ok(unix_time) => {
let in_ms = unix_time.as_secs() * 1000 + unix_time.subsec_nanos() as u64 / 1_000_000;
return in_ms;
},
Err(_) => { return 0; }
}
}
fn pinger(source_url : String, device_info : DeviceInfo, running : sync::Arc<sync::atomic::AtomicBool>, done : sync::mpsc::Sender<bool>) {
let mut ping_accounting_info : PingAccountingInfo = PingAccountingInfo {
responsive: device_info.up,
hysteresis_responsive: 0,
hysteresis_unresponsive: 0
};
println!("[{}] start monitoring", device_info.fqdn);
while running.load(std::sync::atomic::Ordering::Relaxed) {
let start = get_time_msecs();
match pinger_prepare_instance(&device_info.fqdn) {
Ok(oping_instance) => {
pinger_perform_ping(&source_url, &device_info.fqdn, &mut ping_accounting_info, oping_instance);
},
Err(e) => {
println!("[{}] ping instance creation error: {:?}", device_info.fqdn, e);
}
}
let diff = get_time_msecs() - start;
let mut loop_time = PING_LOOP_MSECS;
if ping_accounting_info.hysteresis_responsive > 0 || ping_accounting_info.hysteresis_unresponsive > 0 {
loop_time = PING_HYST_LOOP_MSECS;
}
if diff <= loop_time {
thread::sleep(time::Duration::from_millis(loop_time - diff));
}
}
done.send(true).unwrap();
println!("[{}] stop monitoring", device_info.fqdn);
}
fn start_ping_worker(source_url: &String, device_info: &DeviceInfo, ping_workers: &mut HashMap<String, PingThreadInfo>, state_id: &i64) {
let running = std::sync::Arc::new(sync::atomic::AtomicBool::new(true));
let running_ptit = running.clone();
let (tx, rx) = sync::mpsc::channel();
let device_info_copy = DeviceInfo {
up: device_info.up,
fqdn: device_info.fqdn.clone()
};
let source_url_copy = source_url.clone();
ping_workers.insert(
device_info.fqdn.clone(),
PingThreadInfo {
state_id: *state_id,
thd: thread::spawn(|| {
pinger(source_url_copy, device_info_copy, running_ptit, tx)
}),
running: running,
finished_signal: rx
}
);
}
fn reap_finished_threads(reap_threads: &mut Vec<PingThreadInfo>) {
loop {
let mut reap : bool = false;
let mut idx = 0;
for reap_thread in reap_threads.iter() {
match reap_thread.finished_signal.try_recv() {
Ok(_) => {
reap = true;
break;
},
Err(etype) => {
match etype {
std::sync::mpsc::TryRecvError::Empty => {
idx += 1;
},
std::sync::mpsc::TryRecvError::Disconnected => {
reap = true;
break;
}
}
}
}
}
if reap {
let reaped_pti : PingThreadInfo = reap_threads.swap_remove(idx);
reaped_pti.thd.join().unwrap();
} else {
break;
}
}
}
fn prepare_expired_fqdns_for_reap(ping_workers : &mut HashMap<String, PingThreadInfo>, expired_fqdns : &Vec<String>, reap_threads : &mut Vec<PingThreadInfo>) {
for expired_fqdn in expired_fqdns.iter() {
match ping_workers.remove(expired_fqdn) {
Some(ping_worker) => {
reap_threads.push(ping_worker);
},
None => {}
}
}
}
fn check_expired_fqdn_workers(devices : &HashMap<String, DeviceInfo>, ping_workers : &HashMap<String, PingThreadInfo>, expired_fqdns : &mut Vec<String>, state_id: &i64) {
for (fqdn, ping_worker) in ping_workers.iter() {
if ping_worker.state_id != *state_id {
ping_worker.running.store(false, sync::atomic::Ordering::Relaxed);
expired_fqdns.push(fqdn.clone());
} else {
match devices.get(fqdn) {
Some(_) => {},
None => {
ping_worker.running.store(false, sync::atomic::Ordering::Relaxed);
expired_fqdns.push(fqdn.clone());
}
}
}
}
}
fn check_if_worker_needed(source_url : &String, devices : &HashMap<String, DeviceInfo>, mut ping_workers : &mut HashMap<String, PingThreadInfo>, state_id: &i64) {
for (fqdn, device_info) in devices.iter() {
if ping_workers.contains_key(&*fqdn) {
continue;
}
start_ping_worker(source_url, &device_info, &mut ping_workers, state_id);
}
}
fn main() {
let mut c = Config::new();
let source_url;
let mut ping_workers : HashMap<String, PingThreadInfo> = HashMap::new();
let mut reap_threads : Vec<PingThreadInfo> = Vec::new();
let mut devices : HashMap<String, DeviceInfo> = HashMap::new();
let mut state_id : i64 = 0;
c.merge(File::with_name("/etc/jaspy/pinger.yml").required(false)).unwrap()
.merge(File::with_name("~/.config/jaspy/pinger.yml").required(false)).unwrap()
.merge(Environment::with_prefix("JASPY")).unwrap();
match c.get_str("source_url") {
Ok(v) => { source_url = v },
Err(_) => {
if let Some(argv1) = std::env::args().nth(1) {
source_url = argv1;
} else {
println!("SOURCE_URL not defined!");
return;
}
},
}
loop {
match get_devices(&source_url) {
Ok(received_devices) => {
if state_id != received_devices.1 {
println!("detect state_id change {} -> {}", state_id, received_devices.1)
}
devices = received_devices.0;
state_id = received_devices.1;
},
Err(error) => {
println!("Failed to get new device listing: {}", error);
}
}
let mut expired_fqdns : Vec<String> = Vec::new();
check_if_worker_needed(&source_url, &devices, &mut ping_workers, &state_id);
check_expired_fqdn_workers(&devices, &ping_workers, &mut expired_fqdns, &state_id);
prepare_expired_fqdns_for_reap(&mut ping_workers, &expired_fqdns, &mut reap_threads);
reap_finished_threads(&mut reap_threads);
thread::sleep(time::Duration::from_millis(MAIN_LOOP_MSECS));
}
}
| 35.157333 | 170 | 0.583207 |
760940ca463b7c8e0b5c0555deb8d8bdcb055eea | 2,037 | #[doc = "Reader of register SCGC2"]
pub type R = crate::R<u32, super::SCGC2>;
#[doc = "Reader of field `GPIOA`"]
pub type GPIOA_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIOB`"]
pub type GPIOB_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIOC`"]
pub type GPIOC_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIOD`"]
pub type GPIOD_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIOE`"]
pub type GPIOE_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIOF`"]
pub type GPIOF_R = crate::R<bool, bool>;
#[doc = "Reader of field `UDMA`"]
pub type UDMA_R = crate::R<bool, bool>;
#[doc = "Reader of field `USB0`"]
pub type USB0_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - Port A Clock Gating Control"]
#[inline(always)]
pub fn gpioa(&self) -> GPIOA_R {
GPIOA_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Port B Clock Gating Control"]
#[inline(always)]
pub fn gpiob(&self) -> GPIOB_R {
GPIOB_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Port C Clock Gating Control"]
#[inline(always)]
pub fn gpioc(&self) -> GPIOC_R {
GPIOC_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Port D Clock Gating Control"]
#[inline(always)]
pub fn gpiod(&self) -> GPIOD_R {
GPIOD_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Port E Clock Gating Control"]
#[inline(always)]
pub fn gpioe(&self) -> GPIOE_R {
GPIOE_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Port F Clock Gating Control"]
#[inline(always)]
pub fn gpiof(&self) -> GPIOF_R {
GPIOF_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 13 - Micro-DMA Clock Gating Control"]
#[inline(always)]
pub fn udma(&self) -> UDMA_R {
UDMA_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 16 - USB0 Clock Gating Control"]
#[inline(always)]
pub fn usb0(&self) -> USB0_R {
USB0_R::new(((self.bits >> 16) & 0x01) != 0)
}
}
| 33.393443 | 54 | 0.559156 |
0e9635b5ba50a8ed4f6b5dcd7c96973f60307e73 | 2,345 | use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::{test_rng, Field, One, PrimeField, UniformRand};
use rand::Rng;
use crate::*;
use ark_curve_tests::{curves::*, groups::*};
#[test]
fn test_g1_projective_curve() {
curve_tests::<G1Projective>();
sw_tests::<g1::Parameters>();
}
#[test]
fn test_g1_projective_group() {
let mut rng = test_rng();
let a: G1Projective = rng.gen();
let b: G1Projective = rng.gen();
group_test(a, b);
}
#[test]
fn test_g1_generator() {
let generator = G1Affine::prime_subgroup_generator();
assert!(generator.is_on_curve());
assert!(generator.is_in_correct_subgroup_assuming_on_curve());
}
#[test]
fn test_g2_projective_curve() {
curve_tests::<G2Projective>();
sw_tests::<g2::Parameters>();
}
#[test]
fn test_g2_projective_group() {
let mut rng = test_rng();
let a: G2Projective = rng.gen();
let b: G2Projective = rng.gen();
group_test(a, b);
}
#[test]
fn test_g2_generator() {
let generator = G2Affine::prime_subgroup_generator();
assert!(generator.is_on_curve());
assert!(generator.is_in_correct_subgroup_assuming_on_curve());
}
#[test]
fn test_bilinearity() {
let mut rng = test_rng();
let a: G1Projective = rng.gen();
let b: G2Projective = rng.gen();
let s: Fr = rng.gen();
let sa = a.mul(s.into_repr());
let sb = b.mul(s.into_repr());
let ans1 = MNT6_298::pairing(sa, b);
let ans2 = MNT6_298::pairing(a, sb);
let ans3 = MNT6_298::pairing(a, b).pow(s.into_repr());
assert_eq!(ans1, ans2);
assert_eq!(ans2, ans3);
assert_ne!(ans1, Fq6::one());
assert_ne!(ans2, Fq6::one());
assert_ne!(ans3, Fq6::one());
assert_eq!(ans1.pow(Fr::characteristic()), Fq6::one());
assert_eq!(ans2.pow(Fr::characteristic()), Fq6::one());
assert_eq!(ans3.pow(Fr::characteristic()), Fq6::one());
}
#[test]
fn test_product_of_pairings() {
let rng = &mut test_rng();
let a = G1Projective::rand(rng).into_affine();
let b = G2Projective::rand(rng).into_affine();
let c = G1Projective::rand(rng).into_affine();
let d = G2Projective::rand(rng).into_affine();
let ans1 = MNT6_298::pairing(a, b) * &MNT6_298::pairing(c, d);
let ans2 = MNT6_298::product_of_pairings(&[(a.into(), b.into()), (c.into(), d.into())]);
assert_eq!(ans1, ans2);
}
| 25.769231 | 92 | 0.643923 |
bb1b8065eced613bc65f7d160570b775ad024298 | 20,331 | use crate::parser::hir::syntax_shape::{
expand_syntax, expression::expand_file_path, parse_single_node, BarePathShape,
BarePatternShape, ExpandContext, UnitShape,
};
use crate::parser::{
hir,
hir::{Expression, RawNumber, TokensIterator},
parse::flag::{Flag, FlagKind},
DelimitedNode, Delimiter, FlatShape, RawToken, TokenNode, Unit,
};
use crate::prelude::*;
use crate::{Span, Spanned};
#[derive(Debug)]
pub enum AtomicToken<'tokens> {
Eof {
span: Span,
},
Error {
error: Spanned<ShellError>,
},
Number {
number: RawNumber,
},
Size {
number: Spanned<RawNumber>,
unit: Spanned<Unit>,
},
String {
body: Span,
},
ItVariable {
name: Span,
},
Variable {
name: Span,
},
ExternalCommand {
command: Span,
},
ExternalWord {
text: Span,
},
GlobPattern {
pattern: Span,
},
FilePath {
path: Span,
},
Word {
text: Span,
},
SquareDelimited {
spans: (Span, Span),
nodes: &'tokens Vec<TokenNode>,
},
ParenDelimited {
span: (Span, Span),
nodes: &'tokens Vec<TokenNode>,
},
BraceDelimited {
spans: (Span, Span),
nodes: &'tokens Vec<TokenNode>,
},
Pipeline {
pipe: Option<Span>,
elements: Spanned<&'tokens Vec<TokenNode>>,
},
ShorthandFlag {
name: Span,
},
LonghandFlag {
name: Span,
},
Dot {
text: Span,
},
Operator {
text: Span,
},
Whitespace {
text: Span,
},
}
pub type SpannedAtomicToken<'tokens> = Spanned<AtomicToken<'tokens>>;
impl<'tokens> SpannedAtomicToken<'tokens> {
pub fn into_hir(
&self,
context: &ExpandContext,
expected: &'static str,
) -> Result<hir::Expression, ShellError> {
Ok(match &self.item {
AtomicToken::Eof { .. } => {
return Err(ShellError::type_error(
expected,
"eof atomic token".tagged(self.span),
))
}
AtomicToken::Error { .. } => {
return Err(ShellError::type_error(
expected,
"eof atomic token".tagged(self.span),
))
}
AtomicToken::Operator { .. } => {
return Err(ShellError::type_error(
expected,
"operator".tagged(self.span),
))
}
AtomicToken::ShorthandFlag { .. } => {
return Err(ShellError::type_error(
expected,
"shorthand flag".tagged(self.span),
))
}
AtomicToken::LonghandFlag { .. } => {
return Err(ShellError::type_error(expected, "flag".tagged(self.span)))
}
AtomicToken::Whitespace { .. } => {
return Err(ShellError::unimplemented("whitespace in AtomicToken"))
}
AtomicToken::Dot { .. } => {
return Err(ShellError::type_error(expected, "dot".tagged(self.span)))
}
AtomicToken::Number { number } => {
Expression::number(number.to_number(context.source), self.span)
}
AtomicToken::FilePath { path } => Expression::file_path(
expand_file_path(path.slice(context.source), context),
self.span,
),
AtomicToken::Size { number, unit } => {
Expression::size(number.to_number(context.source), **unit, self.span)
}
AtomicToken::String { body } => Expression::string(*body, self.span),
AtomicToken::ItVariable { name } => Expression::it_variable(*name, self.span),
AtomicToken::Variable { name } => Expression::variable(*name, self.span),
AtomicToken::ExternalCommand { command } => {
Expression::external_command(*command, self.span)
}
AtomicToken::ExternalWord { text } => Expression::string(*text, self.span),
AtomicToken::GlobPattern { pattern } => Expression::pattern(*pattern),
AtomicToken::Word { text } => Expression::string(*text, *text),
AtomicToken::SquareDelimited { .. } => unimplemented!("into_hir"),
AtomicToken::ParenDelimited { .. } => unimplemented!("into_hir"),
AtomicToken::BraceDelimited { .. } => unimplemented!("into_hir"),
AtomicToken::Pipeline { .. } => unimplemented!("into_hir"),
})
}
pub fn spanned_type_name(&self) -> Spanned<&'static str> {
match &self.item {
AtomicToken::Eof { .. } => "eof",
AtomicToken::Error { .. } => "error",
AtomicToken::Operator { .. } => "operator",
AtomicToken::ShorthandFlag { .. } => "shorthand flag",
AtomicToken::LonghandFlag { .. } => "flag",
AtomicToken::Whitespace { .. } => "whitespace",
AtomicToken::Dot { .. } => "dot",
AtomicToken::Number { .. } => "number",
AtomicToken::FilePath { .. } => "file path",
AtomicToken::Size { .. } => "size",
AtomicToken::String { .. } => "string",
AtomicToken::ItVariable { .. } => "$it",
AtomicToken::Variable { .. } => "variable",
AtomicToken::ExternalCommand { .. } => "external command",
AtomicToken::ExternalWord { .. } => "external word",
AtomicToken::GlobPattern { .. } => "file pattern",
AtomicToken::Word { .. } => "word",
AtomicToken::SquareDelimited { .. } => "array literal",
AtomicToken::ParenDelimited { .. } => "parenthesized expression",
AtomicToken::BraceDelimited { .. } => "block",
AtomicToken::Pipeline { .. } => "pipeline",
}
.spanned(self.span)
}
pub fn tagged_type_name(&self) -> Tagged<&'static str> {
match &self.item {
AtomicToken::Eof { .. } => "eof",
AtomicToken::Error { .. } => "error",
AtomicToken::Operator { .. } => "operator",
AtomicToken::ShorthandFlag { .. } => "shorthand flag",
AtomicToken::LonghandFlag { .. } => "flag",
AtomicToken::Whitespace { .. } => "whitespace",
AtomicToken::Dot { .. } => "dot",
AtomicToken::Number { .. } => "number",
AtomicToken::FilePath { .. } => "file path",
AtomicToken::Size { .. } => "size",
AtomicToken::String { .. } => "string",
AtomicToken::ItVariable { .. } => "$it",
AtomicToken::Variable { .. } => "variable",
AtomicToken::ExternalCommand { .. } => "external command",
AtomicToken::ExternalWord { .. } => "external word",
AtomicToken::GlobPattern { .. } => "file pattern",
AtomicToken::Word { .. } => "word",
AtomicToken::SquareDelimited { .. } => "array literal",
AtomicToken::ParenDelimited { .. } => "parenthesized expression",
AtomicToken::BraceDelimited { .. } => "block",
AtomicToken::Pipeline { .. } => "pipeline",
}
.tagged(self.span)
}
pub(crate) fn color_tokens(&self, shapes: &mut Vec<Spanned<FlatShape>>) {
match &self.item {
AtomicToken::Eof { .. } => {}
AtomicToken::Error { .. } => return shapes.push(FlatShape::Error.spanned(self.span)),
AtomicToken::Operator { .. } => {
return shapes.push(FlatShape::Operator.spanned(self.span));
}
AtomicToken::ShorthandFlag { .. } => {
return shapes.push(FlatShape::ShorthandFlag.spanned(self.span));
}
AtomicToken::LonghandFlag { .. } => {
return shapes.push(FlatShape::Flag.spanned(self.span));
}
AtomicToken::Whitespace { .. } => {
return shapes.push(FlatShape::Whitespace.spanned(self.span));
}
AtomicToken::FilePath { .. } => return shapes.push(FlatShape::Path.spanned(self.span)),
AtomicToken::Dot { .. } => return shapes.push(FlatShape::Dot.spanned(self.span)),
AtomicToken::Number {
number: RawNumber::Decimal(_),
} => {
return shapes.push(FlatShape::Decimal.spanned(self.span));
}
AtomicToken::Number {
number: RawNumber::Int(_),
} => {
return shapes.push(FlatShape::Int.spanned(self.span));
}
AtomicToken::Size { number, unit } => {
return shapes.push(
FlatShape::Size {
number: number.span,
unit: unit.span,
}
.spanned(self.span),
);
}
AtomicToken::String { .. } => return shapes.push(FlatShape::String.spanned(self.span)),
AtomicToken::ItVariable { .. } => {
return shapes.push(FlatShape::ItVariable.spanned(self.span))
}
AtomicToken::Variable { .. } => {
return shapes.push(FlatShape::Variable.spanned(self.span))
}
AtomicToken::ExternalCommand { .. } => {
return shapes.push(FlatShape::ExternalCommand.spanned(self.span));
}
AtomicToken::ExternalWord { .. } => {
return shapes.push(FlatShape::ExternalWord.spanned(self.span))
}
AtomicToken::GlobPattern { .. } => {
return shapes.push(FlatShape::GlobPattern.spanned(self.span))
}
AtomicToken::Word { .. } => return shapes.push(FlatShape::Word.spanned(self.span)),
_ => return shapes.push(FlatShape::Error.spanned(self.span)),
}
}
}
#[derive(Debug)]
pub enum WhitespaceHandling {
#[allow(unused)]
AllowWhitespace,
RejectWhitespace,
}
#[derive(Debug)]
pub struct ExpansionRule {
pub(crate) allow_external_command: bool,
pub(crate) allow_external_word: bool,
pub(crate) allow_operator: bool,
pub(crate) allow_eof: bool,
pub(crate) treat_size_as_word: bool,
pub(crate) commit_errors: bool,
pub(crate) whitespace: WhitespaceHandling,
}
impl ExpansionRule {
pub fn new() -> ExpansionRule {
ExpansionRule {
allow_external_command: false,
allow_external_word: false,
allow_operator: false,
allow_eof: false,
treat_size_as_word: false,
commit_errors: false,
whitespace: WhitespaceHandling::RejectWhitespace,
}
}
/// The intent of permissive mode is to return an atomic token for every possible
/// input token. This is important for error-correcting parsing, such as the
/// syntax highlighter.
pub fn permissive() -> ExpansionRule {
ExpansionRule {
allow_external_command: true,
allow_external_word: true,
allow_operator: true,
allow_eof: true,
treat_size_as_word: false,
commit_errors: true,
whitespace: WhitespaceHandling::AllowWhitespace,
}
}
#[allow(unused)]
pub fn allow_external_command(mut self) -> ExpansionRule {
self.allow_external_command = true;
self
}
#[allow(unused)]
pub fn allow_operator(mut self) -> ExpansionRule {
self.allow_operator = true;
self
}
#[allow(unused)]
pub fn no_operator(mut self) -> ExpansionRule {
self.allow_operator = false;
self
}
#[allow(unused)]
pub fn no_external_command(mut self) -> ExpansionRule {
self.allow_external_command = false;
self
}
#[allow(unused)]
pub fn allow_external_word(mut self) -> ExpansionRule {
self.allow_external_word = true;
self
}
#[allow(unused)]
pub fn no_external_word(mut self) -> ExpansionRule {
self.allow_external_word = false;
self
}
#[allow(unused)]
pub fn treat_size_as_word(mut self) -> ExpansionRule {
self.treat_size_as_word = true;
self
}
#[allow(unused)]
pub fn commit_errors(mut self) -> ExpansionRule {
self.commit_errors = true;
self
}
#[allow(unused)]
pub fn allow_whitespace(mut self) -> ExpansionRule {
self.whitespace = WhitespaceHandling::AllowWhitespace;
self
}
#[allow(unused)]
pub fn reject_whitespace(mut self) -> ExpansionRule {
self.whitespace = WhitespaceHandling::RejectWhitespace;
self
}
}
/// If the caller of expand_atom throws away the returned atomic token returned, it
/// must use a checkpoint to roll it back.
pub fn expand_atom<'me, 'content>(
token_nodes: &'me mut TokensIterator<'content>,
expected: &'static str,
context: &ExpandContext,
rule: ExpansionRule,
) -> Result<SpannedAtomicToken<'content>, ShellError> {
if token_nodes.at_end() {
match rule.allow_eof {
true => {
return Ok(AtomicToken::Eof {
span: Span::unknown(),
}
.spanned(Span::unknown()))
}
false => return Err(ShellError::unexpected_eof("anything", Tag::unknown())),
}
}
// First, we'll need to handle the situation where more than one token corresponds
// to a single atomic token
// If treat_size_as_word, don't try to parse the head of the token stream
// as a size.
match rule.treat_size_as_word {
true => {}
false => match expand_syntax(&UnitShape, token_nodes, context) {
// If the head of the stream isn't a valid unit, we'll try to parse
// it again next as a word
Err(_) => {}
// But if it was a valid unit, we're done here
Ok(Spanned {
item: (number, unit),
span,
}) => return Ok(AtomicToken::Size { number, unit }.spanned(span)),
},
}
// Try to parse the head of the stream as a bare path. A bare path includes
// words as well as `.`s, connected together without whitespace.
match expand_syntax(&BarePathShape, token_nodes, context) {
// If we didn't find a bare path
Err(_) => {}
Ok(span) => {
let next = token_nodes.peek_any();
match next.node {
Some(token) if token.is_pattern() => {
// if the very next token is a pattern, we're looking at a glob, not a
// word, and we should try to parse it as a glob next
}
_ => return Ok(AtomicToken::Word { text: span }.spanned(span)),
}
}
}
// Try to parse the head of the stream as a pattern. A pattern includes
// words, words with `*` as well as `.`s, connected together without whitespace.
match expand_syntax(&BarePatternShape, token_nodes, context) {
// If we didn't find a bare path
Err(_) => {}
Ok(span) => return Ok(AtomicToken::GlobPattern { pattern: span }.spanned(span)),
}
// The next token corresponds to at most one atomic token
// We need to `peek` because `parse_single_node` doesn't cover all of the
// cases that `expand_atom` covers. We should probably collapse the two
// if possible.
let peeked = token_nodes.peek_any().not_eof(expected)?;
match peeked.node {
TokenNode::Token(_) => {
// handle this next
}
TokenNode::Error(error) => {
peeked.commit();
return Ok(AtomicToken::Error {
error: error.clone(),
}
.spanned(error.span));
}
// [ ... ]
TokenNode::Delimited(Spanned {
item:
DelimitedNode {
delimiter: Delimiter::Square,
spans,
children,
},
span,
}) => {
peeked.commit();
let span = *span;
return Ok(AtomicToken::SquareDelimited {
nodes: children,
spans: *spans,
}
.spanned(span));
}
TokenNode::Flag(Spanned {
item:
Flag {
kind: FlagKind::Shorthand,
name,
},
span,
}) => {
peeked.commit();
return Ok(AtomicToken::ShorthandFlag { name: *name }.spanned(*span));
}
TokenNode::Flag(Spanned {
item:
Flag {
kind: FlagKind::Longhand,
name,
},
span,
}) => {
peeked.commit();
return Ok(AtomicToken::ShorthandFlag { name: *name }.spanned(*span));
}
// If we see whitespace, process the whitespace according to the whitespace
// handling rules
TokenNode::Whitespace(span) => match rule.whitespace {
// if whitespace is allowed, return a whitespace token
WhitespaceHandling::AllowWhitespace => {
peeked.commit();
return Ok(AtomicToken::Whitespace { text: *span }.spanned(*span));
}
// if whitespace is disallowed, return an error
WhitespaceHandling::RejectWhitespace => {
return Err(ShellError::syntax_error("Unexpected whitespace".tagged(
Tag {
span: *span,
anchor: None,
},
)))
}
},
other => {
let span = peeked.node.span();
peeked.commit();
return Ok(AtomicToken::Error {
error: ShellError::type_error("token", other.tagged_type_name()).spanned(span),
}
.spanned(span));
}
}
parse_single_node(token_nodes, expected, |token, token_span, err| {
Ok(match token {
// First, the error cases. Each error case corresponds to a expansion rule
// flag that can be used to allow the case
// rule.allow_operator
RawToken::Operator(_) if !rule.allow_operator => return Err(err.error()),
// rule.allow_external_command
RawToken::ExternalCommand(_) if !rule.allow_external_command => {
return Err(ShellError::type_error(
expected,
token.type_name().tagged(Tag {
span: token_span,
anchor: None,
}),
))
}
// rule.allow_external_word
RawToken::ExternalWord if !rule.allow_external_word => {
return Err(ShellError::invalid_external_word(Tag {
span: token_span,
anchor: None,
}))
}
RawToken::Number(number) => AtomicToken::Number { number }.spanned(token_span),
RawToken::Operator(_) => AtomicToken::Operator { text: token_span }.spanned(token_span),
RawToken::String(body) => AtomicToken::String { body }.spanned(token_span),
RawToken::Variable(name) if name.slice(context.source) == "it" => {
AtomicToken::ItVariable { name }.spanned(token_span)
}
RawToken::Variable(name) => AtomicToken::Variable { name }.spanned(token_span),
RawToken::ExternalCommand(command) => {
AtomicToken::ExternalCommand { command }.spanned(token_span)
}
RawToken::ExternalWord => {
AtomicToken::ExternalWord { text: token_span }.spanned(token_span)
}
RawToken::GlobPattern => AtomicToken::GlobPattern {
pattern: token_span,
}
.spanned(token_span),
RawToken::Bare => AtomicToken::Word { text: token_span }.spanned(token_span),
})
})
}
| 34.993115 | 100 | 0.525552 |
50aa1550fb768c3539ad13d0484d99141997075a | 21,025 | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use borrow_check::borrow_set::BorrowSet;
use borrow_check::location::LocationTable;
use borrow_check::{JustWrite, WriteAndRead};
use borrow_check::{ShallowOrDeep, Deep, Shallow};
use borrow_check::{ReadOrWrite, Activation, Read, Reservation, Write};
use borrow_check::{Context, ContextKind};
use borrow_check::{LocalMutationIsAllowed, MutateMode};
use borrow_check::ArtificialField;
use borrow_check::{ReadKind, WriteKind};
use borrow_check::nll::facts::AllFacts;
use borrow_check::path_utils::*;
use dataflow::move_paths::indexes::BorrowIndex;
use rustc::hir::def_id::DefId;
use rustc::infer::InferCtxt;
use rustc::mir::visit::Visitor;
use rustc::mir::{BasicBlock, Location, Mir, Place, Rvalue, Local};
use rustc::mir::{Statement, StatementKind};
use rustc::mir::{Terminator, TerminatorKind};
use rustc::mir::{Field, Operand, BorrowKind};
use rustc::ty::{self, ParamEnv};
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::control_flow_graph::dominators::Dominators;
pub(super) fn generate_invalidates<'cx, 'gcx, 'tcx>(
infcx: &InferCtxt<'cx, 'gcx, 'tcx>,
all_facts: &mut Option<AllFacts>,
location_table: &LocationTable,
mir: &Mir<'tcx>,
mir_def_id: DefId,
borrow_set: &BorrowSet<'tcx>,
) {
if !all_facts.is_some() {
// Nothing to do if we don't have any facts
return;
}
let param_env = infcx.tcx.param_env(mir_def_id);
if let Some(all_facts) = all_facts {
let dominators = mir.dominators();
let mut ig = InvalidationGenerator {
all_facts,
borrow_set,
infcx,
location_table,
mir,
dominators,
param_env,
};
ig.visit_mir(mir);
}
}
/// 'cg = the duration of the constraint generation process itself.
struct InvalidationGenerator<'cg, 'cx: 'cg, 'tcx: 'cx, 'gcx: 'tcx> {
infcx: &'cg InferCtxt<'cx, 'gcx, 'tcx>,
all_facts: &'cg mut AllFacts,
location_table: &'cg LocationTable,
mir: &'cg Mir<'tcx>,
dominators: Dominators<BasicBlock>,
borrow_set: &'cg BorrowSet<'tcx>,
param_env: ParamEnv<'gcx>,
}
/// Visits the whole MIR and generates invalidates() facts
/// Most of the code implementing this was stolen from borrow_check/mod.rs
impl<'cg, 'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cg, 'cx, 'tcx, 'gcx> {
fn visit_statement(&mut self,
block: BasicBlock,
statement: &Statement<'tcx>,
location: Location) {
match statement.kind {
StatementKind::Assign(ref lhs, ref rhs) => {
self.consume_rvalue(
ContextKind::AssignRhs.new(location),
rhs,
);
self.mutate_place(
ContextKind::AssignLhs.new(location),
lhs,
Shallow(None),
JustWrite
);
}
StatementKind::SetDiscriminant {
ref place,
variant_index: _,
} => {
self.mutate_place(
ContextKind::SetDiscrim.new(location),
place,
Shallow(Some(ArtificialField::Discriminant)),
JustWrite,
);
}
StatementKind::InlineAsm {
ref asm,
ref outputs,
ref inputs,
} => {
let context = ContextKind::InlineAsm.new(location);
for (o, output) in asm.outputs.iter().zip(outputs) {
if o.is_indirect {
// FIXME(eddyb) indirect inline asm outputs should
// be encoeded through MIR place derefs instead.
self.access_place(
context,
output,
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
);
} else {
self.mutate_place(
context,
output,
if o.is_rw { Deep } else { Shallow(None) },
if o.is_rw { WriteAndRead } else { JustWrite },
);
}
}
for input in inputs {
self.consume_operand(context, input);
}
}
// EndRegion matters to older NLL/MIR AST borrowck, not to alias NLL
StatementKind::EndRegion(..) |
StatementKind::Nop |
StatementKind::UserAssertTy(..) |
StatementKind::Validate(..) |
StatementKind::StorageLive(..) => {
// `Nop`, `UserAssertTy`, `Validate`, and `StorageLive` are irrelevant
// to borrow check.
}
StatementKind::StorageDead(local) => {
self.access_place(
ContextKind::StorageDead.new(location),
&Place::Local(local),
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
);
}
}
self.super_statement(block, statement, location);
}
fn visit_terminator(
&mut self,
block: BasicBlock,
terminator: &Terminator<'tcx>,
location: Location
) {
match terminator.kind {
TerminatorKind::SwitchInt {
ref discr,
switch_ty: _,
values: _,
targets: _,
} => {
self.consume_operand(ContextKind::SwitchInt.new(location), discr);
}
TerminatorKind::Drop {
location: ref drop_place,
target: _,
unwind: _,
} => {
let tcx = self.infcx.tcx;
let gcx = tcx.global_tcx();
let drop_place_ty = drop_place.ty(self.mir, tcx);
let drop_place_ty = tcx.erase_regions(&drop_place_ty).to_ty(tcx);
let drop_place_ty = gcx.lift(&drop_place_ty).unwrap();
self.visit_terminator_drop(location, terminator, drop_place, drop_place_ty);
}
TerminatorKind::DropAndReplace {
location: ref drop_place,
value: ref new_value,
target: _,
unwind: _,
} => {
self.mutate_place(
ContextKind::DropAndReplace.new(location),
drop_place,
Deep,
JustWrite,
);
self.consume_operand(
ContextKind::DropAndReplace.new(location),
new_value,
);
}
TerminatorKind::Call {
ref func,
ref args,
ref destination,
cleanup: _,
} => {
self.consume_operand(ContextKind::CallOperator.new(location), func);
for arg in args {
self.consume_operand(ContextKind::CallOperand.new(location), arg);
}
if let Some((ref dest, _ /*bb*/)) = *destination {
self.mutate_place(
ContextKind::CallDest.new(location),
dest,
Deep,
JustWrite,
);
}
}
TerminatorKind::Assert {
ref cond,
expected: _,
ref msg,
target: _,
cleanup: _,
} => {
self.consume_operand(ContextKind::Assert.new(location), cond);
use rustc::mir::interpret::EvalErrorKind::BoundsCheck;
if let BoundsCheck { ref len, ref index } = *msg {
self.consume_operand(ContextKind::Assert.new(location), len);
self.consume_operand(ContextKind::Assert.new(location), index);
}
}
TerminatorKind::Yield {
ref value,
resume,
drop: _,
} => {
self.consume_operand(ContextKind::Yield.new(location), value);
// Invalidate all borrows of local places
let borrow_set = self.borrow_set.clone();
let resume = self.location_table.start_index(resume.start_location());
for i in borrow_set.borrows.indices() {
if borrow_of_local_data(&borrow_set.borrows[i].borrowed_place) {
self.all_facts.invalidates.push((resume, i));
}
}
}
TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
// Invalidate all borrows of local places
let borrow_set = self.borrow_set.clone();
let start = self.location_table.start_index(location);
for i in borrow_set.borrows.indices() {
if borrow_of_local_data(&borrow_set.borrows[i].borrowed_place) {
self.all_facts.invalidates.push((start, i));
}
}
}
TerminatorKind::Goto { target: _ }
| TerminatorKind::Abort
| TerminatorKind::Unreachable
| TerminatorKind::FalseEdges {
real_target: _,
imaginary_targets: _,
}
| TerminatorKind::FalseUnwind {
real_target: _,
unwind: _,
} => {
// no data used, thus irrelevant to borrowck
}
}
self.super_terminator(block, terminator, location);
}
}
impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cg, 'cx, 'tcx, 'gcx> {
/// Simulates dropping of a variable
fn visit_terminator_drop(
&mut self,
loc: Location,
term: &Terminator<'tcx>,
drop_place: &Place<'tcx>,
erased_drop_place_ty: ty::Ty<'gcx>,
) {
let gcx = self.infcx.tcx.global_tcx();
let drop_field = |
ig: &mut InvalidationGenerator<'cg, 'cx, 'gcx, 'tcx>,
(index, field): (usize, ty::Ty<'gcx>),
| {
let field_ty = gcx.normalize_erasing_regions(ig.param_env, field);
let place = drop_place.clone().field(Field::new(index), field_ty);
ig.visit_terminator_drop(loc, term, &place, field_ty);
};
match erased_drop_place_ty.sty {
// When a struct is being dropped, we need to check
// whether it has a destructor, if it does, then we can
// call it, if it does not then we need to check the
// individual fields instead. This way if `foo` has a
// destructor but `bar` does not, we will only check for
// borrows of `x.foo` and not `x.bar`. See #47703.
ty::TyAdt(def, substs) if def.is_struct() && !def.has_dtor(self.infcx.tcx) => {
def.all_fields()
.map(|field| field.ty(gcx, substs))
.enumerate()
.for_each(|field| drop_field(self, field));
}
// Same as above, but for tuples.
ty::TyTuple(tys) => {
tys.iter().cloned().enumerate()
.for_each(|field| drop_field(self, field));
}
// Closures and generators also have disjoint fields, but they are only
// directly accessed in the body of the closure/generator.
ty::TyGenerator(def, substs, ..)
if *drop_place == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty()
=> {
substs.upvar_tys(def, self.infcx.tcx).enumerate()
.for_each(|field| drop_field(self, field));
}
ty::TyClosure(def, substs)
if *drop_place == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty()
=> {
substs.upvar_tys(def, self.infcx.tcx).enumerate()
.for_each(|field| drop_field(self, field));
}
_ => {
// We have now refined the type of the value being
// dropped (potentially) to just the type of a
// subfield; so check whether that field's type still
// "needs drop". If so, we assume that the destructor
// may access any data it likes (i.e., a Deep Write).
if erased_drop_place_ty.needs_drop(gcx, self.param_env) {
self.access_place(
ContextKind::Drop.new(loc),
drop_place,
(Deep, Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
);
}
}
}
}
/// Simulates mutation of a place
fn mutate_place(
&mut self,
context: Context,
place: &Place<'tcx>,
kind: ShallowOrDeep,
_mode: MutateMode,
) {
self.access_place(
context,
place,
(kind, Write(WriteKind::Mutate)),
LocalMutationIsAllowed::ExceptUpvars,
);
}
/// Simulates consumption of an operand
fn consume_operand(
&mut self,
context: Context,
operand: &Operand<'tcx>,
) {
match *operand {
Operand::Copy(ref place) => {
self.access_place(
context,
place,
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
);
}
Operand::Move(ref place) => {
self.access_place(
context,
place,
(Deep, Write(WriteKind::Move)),
LocalMutationIsAllowed::Yes,
);
}
Operand::Constant(_) => {}
}
}
// Simulates consumption of an rvalue
fn consume_rvalue(
&mut self,
context: Context,
rvalue: &Rvalue<'tcx>,
) {
match *rvalue {
Rvalue::Ref(_ /*rgn*/, bk, ref place) => {
let access_kind = match bk {
BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
BorrowKind::Unique | BorrowKind::Mut { .. } => {
let wk = WriteKind::MutableBorrow(bk);
if allow_two_phase_borrow(&self.infcx.tcx, bk) {
(Deep, Reservation(wk))
} else {
(Deep, Write(wk))
}
}
};
self.access_place(
context,
place,
access_kind,
LocalMutationIsAllowed::No,
);
}
Rvalue::Use(ref operand)
| Rvalue::Repeat(ref operand, _)
| Rvalue::UnaryOp(_ /*un_op*/, ref operand)
| Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/) => {
self.consume_operand(context, operand)
}
Rvalue::Len(ref place) | Rvalue::Discriminant(ref place) => {
let af = match *rvalue {
Rvalue::Len(..) => ArtificialField::ArrayLength,
Rvalue::Discriminant(..) => ArtificialField::Discriminant,
_ => unreachable!(),
};
self.access_place(
context,
place,
(Shallow(Some(af)), Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
);
}
Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
| Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
self.consume_operand(context, operand1);
self.consume_operand(context, operand2);
}
Rvalue::NullaryOp(_op, _ty) => {
}
Rvalue::Aggregate(_, ref operands) => {
for operand in operands {
self.consume_operand(context, operand);
}
}
}
}
/// Simulates an access to a place
fn access_place(
&mut self,
context: Context,
place: &Place<'tcx>,
kind: (ShallowOrDeep, ReadOrWrite),
_is_local_mutation_allowed: LocalMutationIsAllowed,
) {
let (sd, rw) = kind;
// note: not doing check_access_permissions checks because they don't generate invalidates
self.check_access_for_conflict(context, place, sd, rw);
}
fn check_access_for_conflict(
&mut self,
context: Context,
place: &Place<'tcx>,
sd: ShallowOrDeep,
rw: ReadOrWrite,
) {
debug!(
"invalidation::check_access_for_conflict(context={:?}, place={:?}, sd={:?}, \
rw={:?})",
context,
place,
sd,
rw,
);
let tcx = self.infcx.tcx;
let mir = self.mir;
let borrow_set = self.borrow_set.clone();
let indices = self.borrow_set.borrows.indices();
each_borrow_involving_path(
self,
tcx,
mir,
context,
(sd, place),
&borrow_set.clone(),
indices,
|this, borrow_index, borrow| {
match (rw, borrow.kind) {
// Obviously an activation is compatible with its own
// reservation (or even prior activating uses of same
// borrow); so don't check if they interfere.
//
// NOTE: *reservations* do conflict with themselves;
// thus aren't injecting unsoundenss w/ this check.)
(Activation(_, activating), _) if activating == borrow_index => {
// Activating a borrow doesn't generate any invalidations, since we
// have already taken the reservation
}
(Read(_), BorrowKind::Shared) | (Reservation(..), BorrowKind::Shared) => {
// Reads/reservations don't invalidate shared borrows
}
(Read(_), BorrowKind::Unique) | (Read(_), BorrowKind::Mut { .. }) => {
// Reading from mere reservations of mutable-borrows is OK.
if !is_active(&this.dominators, borrow, context.loc) {
// If the borrow isn't active yet, reads don't invalidate it
assert!(allow_two_phase_borrow(&this.infcx.tcx, borrow.kind));
return Control::Continue;
}
// Unique and mutable borrows are invalidated by reads from any
// involved path
this.generate_invalidates(borrow_index, context.loc);
}
(Reservation(_), BorrowKind::Unique)
| (Reservation(_), BorrowKind::Mut { .. })
| (Activation(_, _), _)
| (Write(_), _) => {
// unique or mutable borrows are invalidated by writes.
// Reservations count as writes since we need to check
// that activating the borrow will be OK
// TOOD(bob_twinkles) is this actually the right thing to do?
this.generate_invalidates(borrow_index, context.loc);
}
}
Control::Continue
},
);
}
/// Generate a new invalidates(L, B) fact
fn generate_invalidates(&mut self, b: BorrowIndex, l: Location) {
let lidx = self.location_table.mid_index(l);
self.all_facts.invalidates.push((lidx, b));
}
}
| 38.019892 | 98 | 0.492081 |
4bf93a2ca7cbc9cd22a559867c21336910bf6588 | 29,410 | // Copyright (c) 2017-2019, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
use crate::color::ChromaSampling::Cs400;
use crate::context::*;
use crate::encoder::{FrameInvariants, FrameState};
use crate::frame::*;
use crate::hawktracer::*;
use crate::tiling::*;
use crate::util::{clamp, msb, CastFromPrimitive, Pixel};
use crate::cpu_features::CpuFeatureLevel;
use crate::rayon::iter::*;
use std::cmp;
cfg_if::cfg_if! {
if #[cfg(nasm_x86_64)] {
pub(crate) use crate::asm::x86::cdef::*;
} else {
pub(crate) use self::rust::*;
}
}
pub const CDEF_VERY_LARGE: u16 = 0x8000;
pub(crate) const CDEF_SEC_STRENGTHS: u8 = 4;
pub struct CdefDirections {
dir: [[u8; 8]; 8],
var: [[i32; 8]; 8],
}
pub(crate) mod rust {
use super::*;
use simd_helpers::cold_for_target_arch;
// Instead of dividing by n between 2 and 8, we multiply by 3*5*7*8/n.
// The output is then 840 times larger, but we don't care for finding
// the max.
const CDEF_DIV_TABLE: [i32; 9] = [0, 840, 420, 280, 210, 168, 140, 120, 105];
/// Returns the position and value of the first instance of the max element in
/// a slice as a tuple.
///
/// # Arguments
///
/// * `elems` - A non-empty slice of integers
///
/// # Panics
///
/// Panics if `elems` is empty
#[inline]
fn first_max_element(elems: &[i32]) -> (usize, i32) {
// In case of a tie, the first element must be selected.
let (max_idx, max_value) = elems
.iter()
.enumerate()
.max_by_key(|&(i, v)| (v, -(i as isize)))
.unwrap();
(max_idx, *max_value)
}
// Detect direction. 0 means 45-degree up-right, 2 is horizontal, and so on.
// The search minimizes the weighted variance along all the lines in a
// particular direction, i.e. the squared error between the input and a
// "predicted" block where each pixel is replaced by the average along a line
// in a particular direction. Since each direction have the same sum(x^2) term,
// that term is never computed. See Section 2, step 2, of:
// http://jmvalin.ca/notes/intra_paint.pdf
pub fn cdef_find_dir<T: Pixel>(
img: &PlaneSlice<'_, u16>, var: &mut u32, coeff_shift: usize,
_cpu: CpuFeatureLevel,
) -> i32 {
let mut cost: [i32; 8] = [0; 8];
let mut partial: [[i32; 15]; 8] = [[0; 15]; 8];
for i in 0..8 {
for j in 0..8 {
let p: i32 = img[i][j] as i32;
// We subtract 128 here to reduce the maximum range of the squared
// partial sums.
debug_assert!(p >> coeff_shift <= 255);
let x = (p >> coeff_shift) - 128;
partial[0][i + j] += x;
partial[1][i + j / 2] += x;
partial[2][i] += x;
partial[3][3 + i - j / 2] += x;
partial[4][7 + i - j] += x;
partial[5][3 - i / 2 + j] += x;
partial[6][j] += x;
partial[7][i / 2 + j] += x;
}
}
for i in 0..8 {
cost[2] += partial[2][i] * partial[2][i];
cost[6] += partial[6][i] * partial[6][i];
}
cost[2] *= CDEF_DIV_TABLE[8];
cost[6] *= CDEF_DIV_TABLE[8];
for i in 0..7 {
cost[0] += (partial[0][i] * partial[0][i]
+ partial[0][14 - i] * partial[0][14 - i])
* CDEF_DIV_TABLE[i + 1];
cost[4] += (partial[4][i] * partial[4][i]
+ partial[4][14 - i] * partial[4][14 - i])
* CDEF_DIV_TABLE[i + 1];
}
cost[0] += partial[0][7] * partial[0][7] * CDEF_DIV_TABLE[8];
cost[4] += partial[4][7] * partial[4][7] * CDEF_DIV_TABLE[8];
for i in (1..8).step_by(2) {
for j in 0..5 {
cost[i] += partial[i][3 + j] * partial[i][3 + j];
}
cost[i] *= CDEF_DIV_TABLE[8];
for j in 0..3 {
cost[i] += (partial[i][j] * partial[i][j]
+ partial[i][10 - j] * partial[i][10 - j])
* CDEF_DIV_TABLE[2 * j + 2];
}
}
let (best_dir, best_cost) = first_max_element(&cost);
// Difference between the optimal variance and the variance along the
// orthogonal direction. Again, the sum(x^2) terms cancel out.
// We'd normally divide by 840, but dividing by 1024 is close enough
// for what we're going to do with this. */
*var = ((best_cost - cost[(best_dir + 4) & 7]) >> 10) as u32;
best_dir as i32
}
#[inline(always)]
fn constrain(diff: i32, threshold: i32, damping: i32) -> i32 {
if threshold != 0 {
let shift = cmp::max(0, damping - msb(threshold));
let magnitude =
cmp::min(diff.abs(), cmp::max(0, threshold - (diff.abs() >> shift)));
if diff < 0 {
-magnitude
} else {
magnitude
}
} else {
0
}
}
#[cold_for_target_arch("x86_64")]
#[allow(clippy::erasing_op, clippy::identity_op, clippy::neg_multiply)]
pub(crate) unsafe fn cdef_filter_block<T: Pixel>(
dst: &mut PlaneRegionMut<'_, T>, input: *const u16, istride: isize,
pri_strength: i32, sec_strength: i32, dir: usize, damping: i32,
bit_depth: usize, xdec: usize, ydec: usize, _cpu: CpuFeatureLevel,
) {
let xsize = (8 >> xdec) as isize;
let ysize = (8 >> ydec) as isize;
let coeff_shift = bit_depth as usize - 8;
let cdef_pri_taps = [[4, 2], [3, 3]];
let cdef_sec_taps = [[2, 1], [2, 1]];
let pri_taps = cdef_pri_taps[((pri_strength >> coeff_shift) & 1) as usize];
let sec_taps = cdef_sec_taps[((pri_strength >> coeff_shift) & 1) as usize];
let cdef_directions = [
[-1 * istride + 1, -2 * istride + 2],
[0 * istride + 1, -1 * istride + 2],
[0 * istride + 1, 0 * istride + 2],
[0 * istride + 1, 1 * istride + 2],
[1 * istride + 1, 2 * istride + 2],
[1 * istride + 0, 2 * istride + 1],
[1 * istride + 0, 2 * istride + 0],
[1 * istride + 0, 2 * istride - 1],
];
for i in 0..ysize {
for j in 0..xsize {
let ptr_in = input.offset(i * istride + j);
let x = *ptr_in;
let mut sum = 0 as i32;
let mut max = x;
let mut min = x;
for k in 0..2usize {
let cdef_dirs = [
cdef_directions[dir][k],
cdef_directions[(dir + 2) & 7][k],
cdef_directions[(dir + 6) & 7][k],
];
let pri_tap = pri_taps[k];
let p =
[*ptr_in.offset(cdef_dirs[0]), *ptr_in.offset(-cdef_dirs[0])];
for p_elem in p.iter() {
sum += pri_tap
* constrain(
i32::cast_from(*p_elem) - i32::cast_from(x),
pri_strength,
damping,
);
if *p_elem != CDEF_VERY_LARGE {
max = cmp::max(*p_elem, max);
}
min = cmp::min(*p_elem, min);
}
let s = [
*ptr_in.offset(cdef_dirs[1]),
*ptr_in.offset(-cdef_dirs[1]),
*ptr_in.offset(cdef_dirs[2]),
*ptr_in.offset(-cdef_dirs[2]),
];
let sec_tap = sec_taps[k];
for s_elem in s.iter() {
if *s_elem != CDEF_VERY_LARGE {
max = cmp::max(*s_elem, max);
}
min = cmp::min(*s_elem, min);
sum += sec_tap
* constrain(
i32::cast_from(*s_elem) - i32::cast_from(x),
sec_strength,
damping,
);
}
}
let v = i32::cast_from(x) + ((8 + sum - (sum < 0) as i32) >> 4);
dst[i as usize][j as usize] =
T::cast_from(clamp(v, min as i32, max as i32));
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn check_max_element() {
assert_eq!(first_max_element(&[-1, -1, 1, 2, 3, 4, 6, 6]), (6, 6));
assert_eq!(first_max_element(&[-1, -1, 1, 2, 3, 4, 7, 6]), (6, 7));
assert_eq!(first_max_element(&[0, 0]), (0, 0));
}
}
}
// We use the variance of an 8x8 block to adjust the effective filter strength.
#[inline]
fn adjust_strength(strength: i32, var: i32) -> i32 {
let i = if (var >> 6) != 0 { cmp::min(msb(var >> 6), 12) } else { 0 };
if var != 0 {
(strength * (4 + i) + 8) >> 4
} else {
0
}
}
// For convenience of use alongside cdef_filter_superblock, we assume
// in_frame is padded. Blocks are not scanned outside the block
// boundaries (padding is untouched here).
pub fn cdef_analyze_superblock_range<T: Pixel>(
fi: &FrameInvariants<T>, in_frame: &Frame<u16>, blocks: &TileBlocks<'_>,
sb_w: usize, sb_h: usize,
) -> Vec<CdefDirections> {
let mut ret = Vec::<CdefDirections>::with_capacity(sb_h * sb_w);
for sby in 0..sb_h {
for sbx in 0..sb_w {
let sbo = TileSuperBlockOffset(SuperBlockOffset { x: sbx, y: sby });
ret.push(cdef_analyze_superblock(fi, in_frame, blocks, sbo));
}
}
ret
}
// For convenience of use alongside cdef_filter_superblock, we assume
// in_frame is padded. Blocks are not scanned outside the block
// boundaries (padding is untouched here).
pub fn cdef_analyze_superblock<T: Pixel>(
fi: &FrameInvariants<T>, in_frame: &Frame<u16>, blocks: &TileBlocks<'_>,
sbo: TileSuperBlockOffset,
) -> CdefDirections {
let coeff_shift = fi.sequence.bit_depth as usize - 8;
let mut dir: CdefDirections =
CdefDirections { dir: [[0; 8]; 8], var: [[0; 8]; 8] };
// Each direction block is 8x8 in y, and direction computation only looks at y
for by in 0..8 {
for bx in 0..8 {
let block_offset = sbo.block_offset(bx << 1, by << 1);
if block_offset.0.x < blocks.cols() && block_offset.0.y < blocks.rows() {
let skip = blocks[block_offset].skip
& blocks[sbo.block_offset(2 * bx + 1, 2 * by)].skip
& blocks[sbo.block_offset(2 * bx, 2 * by + 1)].skip
& blocks[sbo.block_offset(2 * bx + 1, 2 * by + 1)].skip;
if !skip {
let mut var: u32 = 0;
let in_plane = &in_frame.planes[0];
let in_po = sbo.plane_offset(&in_plane.cfg);
let in_slice = in_plane.slice(in_po);
dir.dir[bx][by] = cdef_find_dir::<T>(
&in_slice.reslice(8 * bx as isize, 8 * by as isize),
&mut var,
coeff_shift,
fi.cpu_feature_level,
) as u8;
dir.var[bx][by] = var as i32;
}
}
}
}
dir
}
// Allocates and returns a new Frame with its own memory that is
// patterned on the decimation of the Frame backing the passed-in
// Tile. The width and height are in units of 8-pixel (undecimated)
// blocks, the minimum working unit of the CDEF filters.
pub fn cdef_block8_frame<T: Pixel>(
w_8: usize, h_8: usize, pattern_tile: &Tile<'_, T>,
) -> Frame<u16> {
Frame {
planes: [
{
let &PlaneConfig { xdec, ydec, .. } = pattern_tile.planes[0].plane_cfg;
Plane::new(w_8 << 3 >> xdec, h_8 << 3 >> ydec, xdec, ydec, 0, 0)
},
{
let &PlaneConfig { xdec, ydec, .. } = pattern_tile.planes[1].plane_cfg;
Plane::new(w_8 << 3 >> xdec, h_8 << 3 >> ydec, xdec, ydec, 0, 0)
},
{
let &PlaneConfig { xdec, ydec, .. } = pattern_tile.planes[2].plane_cfg;
Plane::new(w_8 << 3 >> xdec, h_8 << 3 >> ydec, xdec, ydec, 0, 0)
},
],
}
}
// Allocates and returns a new Frame with its own memory that is
// patterned on the decimation of the Frame backing the passed-in
// Tile. The width and height are in units of 8-pixel (undecimated)
// blocks, the minimum working unit of the CDEF filters, and the
// padding is in units of individual pixels. The full padding is
// applied even to decimated planes. The contents of the tile,
// beginning at the passed in superblock offset, are copied into the
// new Frame. The padding is also filled from the passed in Tile,
// where pixels are available. Those portions of the new Frame that
// do not overlap visible pixels int he passed in tile are filled with
// the CDEF_VERY_LARGE flag.
pub fn cdef_padded_tile_copy<T: Pixel>(
tile: &Tile<'_, T>, sbo: TileSuperBlockOffset, w_8: usize, h_8: usize,
pad: usize, planes: usize,
) -> Frame<u16> {
let ipad = pad as isize;
let mut out = {
Frame {
planes: {
let new_plane = |pli: usize| {
let &PlaneConfig { xdec, ydec, .. } = tile.planes[pli].plane_cfg;
Plane::new(w_8 << 3 >> xdec, h_8 << 3 >> ydec, xdec, ydec, pad, pad)
};
[new_plane(0), new_plane(1), new_plane(2)]
},
}
};
// Copy data into padded frame
for pli in 0..planes {
let PlaneOffset { x, y } = sbo.plane_offset(tile.planes[pli].plane_cfg);
let in_width = tile.planes[pli].rect().width as isize;
let in_height = tile.planes[pli].rect().height as isize;
let out_width = out.planes[pli].cfg.width;
let out_height = out.planes[pli].cfg.height;
// we copy pixels from the input tile for padding, but don't
// exceed the bounds of the tile (do not contend with other
// threads!)
let mut out_region =
out.planes[pli].region_mut(Area::StartingAt { x: -ipad, y: -ipad });
for yi in 0..(out_height + pad * 2) as isize {
let out_row = &mut out_region[yi as usize];
if y + yi - ipad < 0 || y + yi - ipad >= in_height as isize {
// above or below the visible frame, fill with flag.
// This flag needs to go away (since it forces us to use a 16-bit range)
// but that requires some deep changes to the filtering code
// and buffer offsetting in loop filter RDO
for xi in 0..out_width + pad * 2 {
out_row[xi] = CDEF_VERY_LARGE;
}
} else {
let in_row = &tile.planes[pli][(y + yi - ipad) as usize];
for xi in 0..out_width as isize + ipad * 2 {
if x + xi - ipad >= 0 && x + xi - ipad < in_width as isize {
out_row[xi as usize] =
u16::cast_from(in_row[(x + xi - ipad) as usize]);
} else {
out_row[xi as usize] = CDEF_VERY_LARGE;
}
}
}
}
}
out
}
// Allocates and returns a new Frame with its own memory that is
// padded with the input frame
pub fn cdef_padded_frame_copy<T: Pixel>(in_frame: &Frame<T>) -> Frame<u16> {
let mut out: Frame<u16> = Frame {
planes: {
let new_plane = |pli: usize| {
Plane::new(
in_frame.planes[pli].cfg.width,
in_frame.planes[pli].cfg.height,
in_frame.planes[pli].cfg.xdec,
in_frame.planes[pli].cfg.ydec,
2,
2,
)
};
[new_plane(0), new_plane(1), new_plane(2)]
},
};
for p in 0..MAX_PLANES {
let rec_w = in_frame.planes[p].cfg.width;
let rec_h = in_frame.planes[p].cfg.height;
/* Its a monochrome frame but we have no way to signal that */
if rec_w == 0 || rec_h == 0 {
break;
};
let mut out_region = out.planes[p].region_mut(Area::Rect {
x: -2,
y: -2,
width: rec_w + 4,
height: rec_h + 4,
});
for row in 0..out_region.rect().height {
// pad first two elements of current row
{
let out_row = &mut out_region[row][..2];
out_row[0] = CDEF_VERY_LARGE;
out_row[1] = CDEF_VERY_LARGE;
}
// pad out end of current row
{
let out_row = &mut out_region[row][rec_w + 2..];
for x in out_row {
*x = CDEF_VERY_LARGE;
}
}
// copy current row from input frame if we're in data, or pad if we're in first two rows/last N rows
{
let out_row = &mut out_region[row][2..rec_w + 2];
if row < 2 || row >= rec_h + 2 {
for x in out_row {
*x = CDEF_VERY_LARGE;
}
} else {
let in_stride = in_frame.planes[p].cfg.stride;
for (x, y) in out_row.iter_mut().zip(
in_frame.planes[p].data_origin()
[(row - 2) * in_stride..(row - 1) * in_stride]
.iter(),
) {
*x = u16::cast_from(*y);
}
}
}
}
}
out
}
// We assume in is padded, and the area we'll write out is at least as
// large as the unpadded area of in
// cdef_index is taken from the block context
pub fn cdef_filter_superblock<T: Pixel, U: Pixel>(
fi: &FrameInvariants<T>, in_frame: &Frame<u16>, out: &mut TileMut<'_, U>,
blocks: &TileBlocks<'_>, sbo: TileSuperBlockOffset, cdef_index: u8,
cdef_dirs: &CdefDirections,
) {
let bit_depth = fi.sequence.bit_depth;
let coeff_shift = fi.sequence.bit_depth as i32 - 8;
let cdef_damping = fi.cdef_damping as i32;
let cdef_y_strength = fi.cdef_y_strengths[cdef_index as usize];
let cdef_uv_strength = fi.cdef_uv_strengths[cdef_index as usize];
let cdef_pri_y_strength = (cdef_y_strength / CDEF_SEC_STRENGTHS) as i32;
let mut cdef_sec_y_strength = (cdef_y_strength % CDEF_SEC_STRENGTHS) as i32;
let cdef_pri_uv_strength = (cdef_uv_strength / CDEF_SEC_STRENGTHS) as i32;
let planes = if fi.sequence.chroma_sampling == Cs400 { 1 } else { 3 };
let mut cdef_sec_uv_strength =
(cdef_uv_strength % CDEF_SEC_STRENGTHS) as i32;
if cdef_sec_y_strength == 3 {
cdef_sec_y_strength += 1;
}
if cdef_sec_uv_strength == 3 {
cdef_sec_uv_strength += 1;
}
// Each direction block is 8x8 in y, potentially smaller if subsampled in chroma
for by in 0..8 {
for bx in 0..8 {
let block_offset = sbo.block_offset(bx << 1, by << 1);
if block_offset.0.x < blocks.cols() && block_offset.0.y < blocks.rows() {
let skip = blocks[block_offset].skip
& blocks[sbo.block_offset(2 * bx + 1, 2 * by)].skip
& blocks[sbo.block_offset(2 * bx, 2 * by + 1)].skip
& blocks[sbo.block_offset(2 * bx + 1, 2 * by + 1)].skip;
let dir = cdef_dirs.dir[bx][by];
let var = cdef_dirs.var[bx][by];
for p in 0..planes {
let out_plane = &mut out.planes[p];
let in_plane = &in_frame.planes[p];
let in_po = sbo.plane_offset(&in_plane.cfg);
let xdec = in_plane.cfg.xdec;
let ydec = in_plane.cfg.ydec;
let in_stride = in_plane.cfg.stride;
let in_slice = &in_plane.slice(in_po);
let out_region =
&mut out_plane.subregion_mut(Area::BlockStartingAt {
bo: sbo.block_offset(0, 0).0,
});
let xsize = 8 >> xdec;
let ysize = 8 >> ydec;
if !skip {
let local_pri_strength;
let local_sec_strength;
let mut local_damping: i32 = cdef_damping + coeff_shift;
// See `Cdef_Uv_Dir` constant lookup table in Section 7.15.1
// <https://aomediacodec.github.io/av1-spec/#cdef-block-process>
let local_dir = if p == 0 {
local_pri_strength =
adjust_strength(cdef_pri_y_strength << coeff_shift, var);
local_sec_strength = cdef_sec_y_strength << coeff_shift;
if cdef_pri_y_strength != 0 {
dir as usize
} else {
0
}
} else {
local_pri_strength = cdef_pri_uv_strength << coeff_shift;
local_sec_strength = cdef_sec_uv_strength << coeff_shift;
local_damping -= 1;
if cdef_pri_uv_strength != 0 {
if xdec != ydec {
[7, 0, 2, 4, 5, 6, 6, 6][dir as usize]
} else {
dir as usize
}
} else {
0
}
};
unsafe {
let PlaneConfig { ypad, xpad, .. } = in_slice.plane.cfg;
assert!(
in_slice.rows_iter().len() + ypad
>= ((8 * by) >> ydec) + ysize + 2
);
assert!(in_slice.x - 2 >= -(xpad as isize));
assert!(in_slice.y - 2 >= -(ypad as isize));
let mut dst = out_region.subregion_mut(Area::BlockRect {
bo: BlockOffset { x: 2 * bx, y: 2 * by },
width: xsize,
height: ysize,
});
let input =
in_slice[(8 * by) >> ydec][(8 * bx) >> xdec..].as_ptr();
cdef_filter_block(
&mut dst,
input,
in_stride as isize,
local_pri_strength,
local_sec_strength,
local_dir,
local_damping,
bit_depth,
xdec,
ydec,
fi.cpu_feature_level,
);
}
} else {
// we need to copy input to output
let in_block =
in_slice.subslice((8 * bx) >> xdec, (8 * by) >> ydec);
let mut out_block = out_region.subregion_mut(Area::BlockRect {
bo: BlockOffset { x: 2 * bx, y: 2 * by },
width: xsize,
height: ysize,
});
for i in 0..ysize {
for j in 0..xsize {
out_block[i][j] = U::cast_from(in_block[i][j]);
}
}
}
}
}
}
}
}
#[hawktracer(cdef_filter_tile_group)]
pub fn cdef_filter_tile_group<T: Pixel>(
fi: &FrameInvariants<T>, fs: &mut FrameState<T>, blocks: &mut FrameBlocks,
) {
let ti = &fi.tiling;
let in_padded_frame = cdef_padded_frame_copy(&fs.rec);
ti.tile_iter_mut(fs, blocks).collect::<Vec<_>>().into_par_iter().for_each(
|mut ctx| {
cdef_filter_tile(
fi,
&mut ctx.ts.rec,
&ctx.tb.as_const(),
&in_padded_frame,
);
},
);
}
// Input to this process is the array CurrFrame of reconstructed samples and padded input Frame.
// Output from this process is the array CdefFrame containing deringed samples.
// The purpose of CDEF is to perform deringing based on the detected direction of blocks.
// CDEF parameters are stored for each 64 by 64 block of pixels.
// The CDEF filter is applied on each 8 by 8 block of pixels.
// Reference: http://av1-spec.argondesign.com/av1-spec/av1-spec.html#cdef-process
#[hawktracer(cdef_filter_tile)]
pub fn cdef_filter_tile<T: Pixel>(
fi: &FrameInvariants<T>, rec: &mut TileMut<'_, T>, tb: &TileBlocks,
in_padded_frame: &Frame<u16>,
) {
// Each filter block is 64x64, except right and/or bottom for non-multiple-of-64 sizes.
// FIXME: 128x128 SB support will break this, we need FilterBlockOffset etc.
let planes = if fi.sequence.chroma_sampling == Cs400 { 1 } else { 3 };
let fb_width = (rec.planes[0].rect().width + 63) / 64;
let fb_height = (rec.planes[0].rect().height + 63) / 64;
// Construct a padded copy of part of the input tile
let mut cdef_frame: Frame<u16> = Frame {
planes: {
let new_plane = |pli: usize| {
Plane::new(
(fb_width * 64) >> rec.planes[pli].plane_cfg.xdec,
(fb_height * 64) >> rec.planes[pli].plane_cfg.ydec,
rec.planes[pli].plane_cfg.xdec,
rec.planes[pli].plane_cfg.ydec,
2,
2,
)
};
[new_plane(0), new_plane(1), new_plane(2)]
},
};
for p in 0..planes {
let rec_w = rec.planes[p].rect().width;
let rec_h = rec.planes[p].rect().height;
let mut cdef_region = cdef_frame.planes[p].region_mut(Area::Rect {
x: -2,
y: -2,
width: rec_w + 4,
height: rec_h + 4,
});
let in_padded_region = in_padded_frame.planes[p].region(Area::Rect {
x: rec.planes[p].rect().x - 2,
y: rec.planes[p].rect().y - 2,
width: rec_w + 4,
height: rec_h + 4,
});
for row in 0..cdef_region.rect().height {
let cdef_row = &mut cdef_region[row][..];
let in_padded_row = &in_padded_region[row];
cdef_row.copy_from_slice(in_padded_row);
}
}
// Perform actual CDEF, using the padded copy as source, and the input rec vector as destination.
for fby in 0..fb_height {
for fbx in 0..fb_width {
let sbo = TileSuperBlockOffset(SuperBlockOffset { x: fbx, y: fby });
let cdef_index = tb.get_cdef(sbo);
let cdef_dirs = cdef_analyze_superblock(fi, &cdef_frame, tb, sbo);
cdef_filter_superblock(
fi,
&cdef_frame,
rec,
tb,
sbo,
cdef_index,
&cdef_dirs,
);
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::api::*;
use crate::encoder::*;
fn create_frame() -> (Frame<u16>, FrameInvariants<u16>) {
let mut frame = Frame::<u16>::new(512, 512, ChromaSampling::Cs420);
// in this test, each pixel contains the sum of its row and column indices:
//
// 0 1 2 3 4 . .
// 1 2 3 4 5 . .
// 2 3 4 5 6 . .
// 3 4 5 6 7 . .
// 4 5 6 7 8 . .
// . . . . . . .
// . . . . . . .
for plane in &mut frame.planes {
let PlaneConfig { width, height, .. } = plane.cfg;
let mut slice = plane.mut_slice(PlaneOffset::default());
for col in 0..width {
for row in 0..height {
slice[row][col] = (row + col) as u16;
}
}
}
let config = EncoderConfig {
width: 512,
height: 512,
quantizer: 100,
speed_settings: SpeedSettings::from_preset(10),
..Default::default()
};
let sequence = Sequence::new(&config);
let fi = FrameInvariants::new(config, sequence);
(frame, fi)
}
#[test]
fn test_padded_tile_copy() {
let (frame, _fi) = create_frame();
let tile = frame.as_tile();
// a super-block in the middle (not near frame borders)
let pad = 2;
let padded_frame = cdef_padded_tile_copy(
&tile,
TileSuperBlockOffset(SuperBlockOffset { x: 1, y: 2 }),
64 >> 3,
64 >> 3,
pad,
3,
);
// index (0, 0) of padded_frame should match index (64, 128) of the source
// frame, have 2 cols and rows padding from the source frame on all sides,
// and have a size of (64, 64)
assert_eq!(padded_frame.planes[0].cfg.width, 64);
assert_eq!(padded_frame.planes[0].cfg.height, 64);
let po = PlaneOffset { x: 62, y: 126 };
let in_luma_slice = frame.planes[0].slice(po);
let out_luma_region =
padded_frame.planes[0].region(Area::StartingAt { x: -2, y: -2 });
// this region does not overlap the frame padding, so it contains only
// values from the input frame
for row in 0..68 {
for col in 0..68 {
let in_pixel = in_luma_slice[row][col];
let out_pixel = out_luma_region[row][col];
assert_eq!(in_pixel, out_pixel);
}
}
}
#[test]
fn test_padded_tile_copy_outside_input() {
let (frame, _fi) = create_frame();
let tile = frame.as_tile();
// the top-right super-block (near top and right frame borders)
let pad = 2;
let padded_frame = cdef_padded_tile_copy(
&tile,
TileSuperBlockOffset(SuperBlockOffset { x: 7, y: 0 }),
64 >> 3,
64 >> 3,
pad,
3,
);
// index (0, 0) of padded_frame should match index (448, 0) of the source
// frame, have 2 cols/rows from the source frame left and below, 2
// cols/rows of padding value right and above, and have a size of (64, 64)
assert_eq!(padded_frame.planes[0].cfg.width, 64);
assert_eq!(padded_frame.planes[0].cfg.height, 64);
let po = PlaneOffset { x: 446, y: 0 };
let in_luma_slice = frame.planes[0].slice(po);
let out_luma_slice =
padded_frame.planes[0].region(Area::StartingAt { x: -2, y: 0 });
// this region does not overlap the frame padding, so it contains only
// values from the input frame
for row in 0..66 {
for col in 0..66 {
let in_pixel = in_luma_slice[row][col];
let out_pixel = out_luma_slice[row][col];
assert_eq!(out_pixel, in_pixel);
}
// right frame padding
for col in 66..68 {
let out_pixel = out_luma_slice[row][col];
assert_eq!(out_pixel, CDEF_VERY_LARGE);
}
}
// top frame padding
let out_luma_slice =
padded_frame.planes[0].region(Area::StartingAt { x: -2, y: -2 });
for row in 0..2 {
for col in 0..68 {
let out_pixel = out_luma_slice[row][col];
assert_eq!(out_pixel, CDEF_VERY_LARGE);
}
}
}
#[test]
fn test_padded_frame_copy() {
let (frame, _fi) = create_frame();
let padded_frame = cdef_padded_frame_copy(&frame);
let rec_w = padded_frame.planes[0].cfg.width;
let rec_h = padded_frame.planes[0].cfg.height;
assert_eq!(rec_w, 512);
assert_eq!(rec_h, 512);
let po = PlaneOffset { x: 0, y: 0 };
let in_luma_slice = frame.planes[0].slice(po);
let out_luma_region =
padded_frame.planes[0].region(Area::StartingAt { x: -2, y: -2 });
for row in 0..padded_frame.planes[0].cfg.width + 4 {
for col in 0..padded_frame.planes[0].cfg.height + 4 {
let out_pixel = out_luma_region[row][col];
if row < 2 || col < 2 || row >= rec_h + 2 || col >= rec_w + 2 {
// padding region
assert_eq!(out_pixel, CDEF_VERY_LARGE);
} else {
// values from the input frame
let in_pixel = in_luma_slice[row - 2][col - 2];
assert_eq!(in_pixel, out_pixel);
}
}
}
}
}
| 33.960739 | 106 | 0.56882 |
14294cb020df223d7753fc7d38c31ea323429c86 | 136 | extern crate lolbench ; # [ test ] fn end_to_end ( ) {
lolbench :: end_to_end_test ( "quickcheck_0_6_1" , "shrink_unit_8_tuple" , ) ;
} | 45.333333 | 78 | 0.683824 |
f5deabb01dfcb436ff31aea520cb809696e26fe2 | 4,472 | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::writer::{
Error, Inner, InnerValueType, InspectType, InspectTypeInternal, NumericProperty, Property,
State,
};
use tracing::error;
#[cfg(test)]
use {inspect_format::Block, mapped_vmo::Mapping, std::sync::Arc};
/// Inspect uint property data type.
///
/// NOTE: do not rely on PartialEq implementation for true comparison.
/// Instead leverage the reader.
///
/// NOTE: Operations on a Default value are no-ops.
#[derive(Debug, PartialEq, Eq, Default)]
pub struct UintProperty {
inner: Inner<InnerValueType>,
}
impl InspectType for UintProperty {}
impl InspectTypeInternal for UintProperty {
fn new(state: State, block_index: u32) -> Self {
Self { inner: Inner::new(state, block_index) }
}
fn is_valid(&self) -> bool {
self.inner.is_valid()
}
fn new_no_op() -> Self {
Self { inner: Inner::None }
}
}
impl<'t> Property<'t> for UintProperty {
type Type = u64;
fn set(&self, value: u64) {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|state| state.set_uint_metric(inner_ref.block_index, value))
.unwrap_or_else(|err| {
error!(?err, "Failed to set property");
});
}
}
}
impl NumericProperty for UintProperty {
type Type = u64;
fn add(&self, value: u64) {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|state| state.add_uint_metric(inner_ref.block_index, value))
.unwrap_or_else(|err| {
error!(?err, "Failed to set property");
});
}
}
fn subtract(&self, value: u64) {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|state| state.subtract_uint_metric(inner_ref.block_index, value))
.unwrap_or_else(|err| {
error!(?err, "Failed to set property");
});
}
}
fn get(&self) -> Result<u64, Error> {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|state| state.get_uint_metric(inner_ref.block_index))
} else {
Err(Error::NoOp("Property"))
}
}
}
#[cfg(test)]
impl UintProperty {
/// Returns the [`Block`][Block] associated with this value.
pub fn get_block(&self) -> Option<Block<Arc<Mapping>>> {
self.inner.inner_ref().and_then(|inner_ref| {
inner_ref
.state
.try_lock()
.and_then(|state| state.heap().get_block(inner_ref.block_index))
.ok()
})
}
/// Returns the index of the value's block in the VMO.
pub fn block_index(&self) -> u32 {
self.inner.inner_ref().unwrap().block_index
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::writer::{testing_utils::get_state, Node};
use inspect_format::BlockType;
#[test]
fn uint_property() {
// Create and use a default value.
let default = UintProperty::default();
default.add(1);
let state = get_state(4096);
let root = Node::new_root(state);
let node = root.create_child("node");
let node_block = node.get_block().unwrap();
{
let property = node.create_uint("property", 1);
let property_block = property.get_block().unwrap();
assert_eq!(property_block.block_type(), BlockType::UintValue);
assert_eq!(property_block.uint_value().unwrap(), 1);
assert_eq!(node_block.child_count().unwrap(), 1);
property.set(5);
assert_eq!(property_block.uint_value().unwrap(), 5);
assert_eq!(property.get().unwrap(), 5);
property.subtract(3);
assert_eq!(property_block.uint_value().unwrap(), 2);
property.add(8);
assert_eq!(property_block.uint_value().unwrap(), 10);
}
assert_eq!(node_block.child_count().unwrap(), 0);
}
}
| 29.615894 | 94 | 0.564401 |
e58baece143e26c6fbdaa561971e42e024bb1f09 | 6,082 | <?xml version="1.0" encoding="UTF-8"?>
<WebElementEntity>
<description></description>
<name>input_Password_password</name>
<tag></tag>
<elementGuidId>b61e60c7-52ef-48e6-8a31-cd6adfd93cd5</elementGuidId>
<selectorCollection>
<entry>
<key>XPATH</key>
<value>//input[@name='password']</value>
</entry>
</selectorCollection>
<selectorMethod>XPATH</selectorMethod>
<useRalativeImagePath>false</useRalativeImagePath>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>tag</name>
<type>Main</type>
<value>input</value>
</webElementProperties>
<webElementProperties>
<isSelected>true</isSelected>
<matchCondition>equals</matchCondition>
<name>name</name>
<type>Main</type>
<value>password</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>class</name>
<type>Main</type>
<value>gigya-input-password</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>tabindex</name>
<type>Main</type>
<value>0</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>formnovalidate</name>
<type>Main</type>
<value>formnovalidate</value>
</webElementProperties>
<webElementProperties>
<isSelected>true</isSelected>
<matchCondition>equals</matchCondition>
<name>type</name>
<type>Main</type>
<value>password</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>data-kwimpalastatus</name>
<type>Main</type>
<value>alive</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>data-kwimpalaid</name>
<type>Main</type>
<value>1562611961603-8</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>gigya-expression:data-gigya-placeholder</name>
<type>Main</type>
<value>screenset.translations['PASSWORD_132128826476804690_PLACEHOLDER']</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>gigya-expression:aria-label</name>
<type>Main</type>
<value>screenset.translations['PASSWORD_132128826476804690_PLACEHOLDER']</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>data-screenset-element-id</name>
<type>Main</type>
<value>__gig_template_element_8_1580306183997</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>data-screenset-element-id-publish</name>
<type>Main</type>
<value>false</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>data-screenset-roles</name>
<type>Main</type>
<value>instance</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>data-gigya-name</name>
<type>Main</type>
<value>password</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>aria-required</name>
<type>Main</type>
<value>true</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>data-gigya-placeholder</name>
<type>Main</type>
<value>Password</value>
</webElementProperties>
<webElementProperties>
<isSelected>true</isSelected>
<matchCondition>equals</matchCondition>
<name>placeholder</name>
<type>Main</type>
<value>Password *</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>aria-label</name>
<type>Main</type>
<value>Password</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath</name>
<type>Main</type>
<value>id("gigya-login-form")/div[@class="gigya-layout-row with-divider"]/div[@class="gigya-layout-cell responsive with-site-login"]/div[@class="gigya-composite-control gigya-composite-control-password"]/input[@class="gigya-input-password"]</value>
</webElementProperties>
<webElementXpaths>
<isSelected>true</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:attributes</name>
<type>Main</type>
<value>//input[@name='password']</value>
</webElementXpaths>
<webElementXpaths>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:idRelative</name>
<type>Main</type>
<value>//form[@id='gigya-login-form']/div[2]/div[3]/div[2]/input</value>
</webElementXpaths>
<webElementXpaths>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:position</name>
<type>Main</type>
<value>//div[2]/input</value>
</webElementXpaths>
</WebElementEntity>
| 35.776471 | 305 | 0.660967 |
64b4c7bbaff0e3a0e9ccb77ef462fb73169c8390 | 31,596 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use super::*;
use crate::peer_manager::PeerManagerRequest;
use core::str::FromStr;
use crypto::{ed25519::compat, test_utils::TEST_SEED, x25519};
use futures::{FutureExt, SinkExt, TryFutureExt};
use memsocket::MemorySocket;
use rand::{rngs::StdRng, SeedableRng};
use std::io;
use tokio::runtime::Runtime;
use tokio_retry::strategy::FixedInterval;
fn setup_conn_mgr(
rt: &mut Runtime,
seed_peer_id: PeerId,
) -> (
channel::Receiver<PeerManagerRequest<MemorySocket>>,
channel::Sender<PeerManagerNotification<MemorySocket>>,
channel::Sender<ConnectivityRequest>,
channel::Sender<()>,
) {
let (peer_mgr_reqs_tx, peer_mgr_reqs_rx): (
channel::Sender<PeerManagerRequest<MemorySocket>>,
_,
) = channel::new_test(0);
let (peer_mgr_notifs_tx, peer_mgr_notifs_rx) = channel::new_test(0);
let (conn_mgr_reqs_tx, conn_mgr_reqs_rx) = channel::new_test(0);
let (ticker_tx, ticker_rx) = channel::new_test(0);
let mut rng = StdRng::from_seed(TEST_SEED);
let (_, signing_public_key) = compat::generate_keypair(&mut rng);
let (_, identity_public_key) = x25519::compat::generate_keypair(&mut rng);
let conn_mgr = {
ConnectivityManager::new(
Arc::new(RwLock::new(
vec![(
seed_peer_id,
NetworkPublicKeys {
identity_public_key,
signing_public_key,
},
)]
.into_iter()
.collect(),
)),
ticker_rx,
PeerManagerRequestSender::new(peer_mgr_reqs_tx),
peer_mgr_notifs_rx,
conn_mgr_reqs_rx,
FixedInterval::from_millis(100),
300, /* ms */
)
};
rt.spawn(conn_mgr.start().boxed().unit_error().compat());
(
peer_mgr_reqs_rx,
peer_mgr_notifs_tx,
conn_mgr_reqs_tx,
ticker_tx,
)
}
fn gen_peer() -> (PeerId, NetworkPublicKeys) {
let peer_id = PeerId::random();
let mut rng = StdRng::from_seed(TEST_SEED);
let (_, signing_public_key) = compat::generate_keypair(&mut rng);
let (_, identity_public_key) = x25519::compat::generate_keypair(&mut rng);
(
peer_id,
NetworkPublicKeys {
identity_public_key,
signing_public_key,
},
)
}
async fn get_dial_queue_size(conn_mgr_reqs_tx: &mut channel::Sender<ConnectivityRequest>) -> usize {
let (queue_size_tx, queue_size_rx) = oneshot::channel();
conn_mgr_reqs_tx
.send(ConnectivityRequest::GetDialQueueSize(queue_size_tx))
.await
.unwrap();
queue_size_rx.await.unwrap()
}
async fn expect_disconnect_request<'a, TSubstream>(
peer_mgr_reqs_rx: &'a mut channel::Receiver<PeerManagerRequest<TSubstream>>,
peer_mgr_notifs_tx: &'a mut channel::Sender<PeerManagerNotification<TSubstream>>,
peer_id: PeerId,
address: Multiaddr,
result: Result<(), PeerManagerError>,
) where
TSubstream: Debug,
{
let success = result.is_ok();
match peer_mgr_reqs_rx.next().await.unwrap() {
PeerManagerRequest::DisconnectPeer(p, error_tx) => {
assert_eq!(peer_id, p);
error_tx.send(result).unwrap();
}
_ => {
panic!("unexpected request to peer manager");
}
}
if success {
peer_mgr_notifs_tx
.send(PeerManagerNotification::LostPeer(peer_id, address))
.await
.unwrap();
}
}
async fn expect_dial_request<'a, TSubstream>(
peer_mgr_reqs_rx: &'a mut channel::Receiver<PeerManagerRequest<TSubstream>>,
peer_mgr_notifs_tx: &'a mut channel::Sender<PeerManagerNotification<TSubstream>>,
conn_mgr_reqs_tx: &'a mut channel::Sender<ConnectivityRequest>,
peer_id: PeerId,
address: Multiaddr,
result: Result<(), PeerManagerError>,
) where
TSubstream: Debug,
{
let success = result.is_ok();
match peer_mgr_reqs_rx.next().await.unwrap() {
PeerManagerRequest::DialPeer(p, addr, error_tx) => {
assert_eq!(peer_id, p);
assert_eq!(address, addr);
error_tx.send(result).unwrap();
}
_ => {
panic!("unexpected request to peer manager");
}
}
if success {
info!(
"Sending NewPeer notification for peer: {}",
peer_id.short_str()
);
peer_mgr_notifs_tx
.send(PeerManagerNotification::NewPeer(peer_id, address))
.await
.unwrap();
}
// Wait for dial queue to be empty. Without this, it's impossible to guarantee that a completed
// dial is removed from a dial queue. We need this guarantee to see the effects of future
// triggers for connectivity check.
info!("Waiting for dial queue to be empty");
loop {
let queue_size = get_dial_queue_size(conn_mgr_reqs_tx).await;
if queue_size == 0 {
break;
}
}
}
#[test]
fn addr_change() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
// Fake peer manager and discovery.
let f_peer_mgr = async move {
let seed_address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9090").unwrap();
// Send address of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_address.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address.clone(),
Ok(()),
)
.await;
// Send request to connect to seed peer at old address. ConnectivityManager should not
// dial, since we are already connected at the new address. The absence of another dial
// attempt is hard to test explicitly. It will get implicitly tested if the dial
// attempt arrives in place of some other expected message in the future.
info!("Sending same address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_address.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
let seed_address_new = Multiaddr::from_str("/ip4/127.0.1.1/tcp/8080").unwrap();
// Send new address of seed peer.
info!("Sending new address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_address_new.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// We expect the peer which changed its address to also disconnect.
info!("Sending lost peer notification for seed peer at old address");
peer_mgr_notifs_tx
.send(PeerManagerNotification::LostPeer(
seed_peer_id,
seed_address,
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager then receives a request to connect to the seed peer at new address.
info!("Waiting to receive dial request to seed peer at new address");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address_new,
Ok(()),
)
.await;
};
rt.block_on(f_peer_mgr.boxed().unit_error().compat())
.unwrap();
}
#[test]
fn lost_connection() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
// Fake peer manager and discovery.
let f_peer_mgr = async move {
let seed_address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9090").unwrap();
// Send address of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_address.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address.clone(),
Ok(()),
)
.await;
// Notify connectivity actor of loss of connection to seed_peer.
info!("Sending LostPeer event to signal connection loss");
peer_mgr_notifs_tx
.send(PeerManagerNotification::LostPeer(
seed_peer_id,
seed_address.clone(),
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer after loss of
// connection.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address.clone(),
Ok(()),
)
.await;
};
rt.block_on(f_peer_mgr.boxed().unit_error().compat())
.unwrap();
}
#[test]
fn disconnect() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
let events_f = async move {
let seed_address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9090").unwrap();
// Send address of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_address.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address.clone(),
Ok(()),
)
.await;
// Send request to make seed peer ineligible.
info!("Sending request to make seed peer ineligible");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateEligibleNodes(HashMap::new()))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer.
info!("Waiting to receive disconnect request");
expect_disconnect_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
seed_peer_id,
seed_address.clone(),
Ok(()),
)
.await;
};
rt.block_on(events_f.boxed().unit_error().compat()).unwrap();
}
// Tests that connectivity manager retries dials and disconnects on failure.
#[test]
fn retry_on_failure() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
let events_f = async move {
let seed_address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9090").unwrap();
// Send address of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_address.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address.clone(),
Err(PeerManagerError::IoError(io::Error::from(
io::ErrorKind::ConnectionRefused,
))),
)
.await;
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager again receives a request to connect to the seed peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address.clone(),
Ok(()),
)
.await;
// Send request to make seed peer ineligible.
info!("Sending request to make seed peer ineligible");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateEligibleNodes(HashMap::new()))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to disconnect from the seed peer, which fails.
info!("Waiting to receive disconnect request");
expect_disconnect_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
seed_peer_id,
seed_address.clone(),
Err(PeerManagerError::IoError(io::Error::from(
io::ErrorKind::Interrupted,
))),
)
.await;
// Trigger connectivity check again.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives another request to disconnect from the seed peer, which now
// succeeds.
info!("Waiting to receive disconnect request");
expect_disconnect_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
seed_peer_id,
seed_address.clone(),
Ok(()),
)
.await;
};
rt.block_on(events_f.boxed().unit_error().compat()).unwrap();
}
#[test]
// Tests that if we dial an already connected peer or disconnect from an already disconnected
// peer, connectivity manager does not send any additional dial or disconnect requests.
fn no_op_requests() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
let events_f = async move {
let seed_address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9090").unwrap();
// Send address of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_address.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_address.clone(),
Err(PeerManagerError::AlreadyConnected(seed_address.clone())),
)
.await;
// Send a delayed NewPeer notification.
info!("Sending delayed NewPeer notification for seed peer");
peer_mgr_notifs_tx
.send(PeerManagerNotification::NewPeer(
seed_peer_id,
seed_address.clone(),
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Send request to make seed peer ineligible.
info!("Sending request to make seed peer ineligible");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateEligibleNodes(HashMap::new()))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to disconnect from the seed peer, which fails.
info!("Waiting to receive disconnect request");
expect_disconnect_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
seed_peer_id,
seed_address.clone(),
Err(PeerManagerError::NotConnected(seed_peer_id)),
)
.await;
// Send delayed LostPeer notification for seed peer.
peer_mgr_notifs_tx
.send(PeerManagerNotification::LostPeer(
seed_peer_id,
seed_address.clone(),
))
.await
.unwrap();
// Trigger connectivity check again. We don't expect connectivity manager to do
// anything - if it does, the task should panic. That may not fail the test (right
// now), but will be easily spotted by someone running the tests locallly.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
};
rt.block_on(events_f.boxed().unit_error().compat()).unwrap();
}
#[test]
fn backoff_on_failure() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
let events_f = async move {
let (peer_a, peer_a_keys) = gen_peer();
let peer_a_address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9090").unwrap();
let (peer_b, peer_b_keys) = gen_peer();
let peer_b_address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/8080").unwrap();
info!("Sending list of eligible peers");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateEligibleNodes(
[(peer_a, peer_a_keys), (peer_b, peer_b_keys)]
.iter()
.cloned()
.collect(),
))
.await
.unwrap();
// Send address of peer a.
info!("Sending address of peer a");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
peer_a,
vec![peer_a_address.clone()],
))
.await
.unwrap();
// Send address of peer b.
info!("Sending address of peer b");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
peer_b,
vec![peer_b_address.clone()],
))
.await
.unwrap();
// Send NewPeer notification for peer_b.
info!("Sending NewPeer notification for peer b");
peer_mgr_notifs_tx
.send(PeerManagerNotification::NewPeer(
peer_b,
peer_b_address.clone(),
))
.await
.unwrap();
// We fail 10 attempts and ensure that the elapsed duration between successive attempts is
// always greater than 100ms (the fixed backoff). In production, an exponential backoff
// strategy is used.
for _ in 0..10 {
let start = Instant::now();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Peer manager receives a request to connect to the seed peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
peer_a,
peer_a_address.clone(),
Err(PeerManagerError::IoError(io::Error::from(
io::ErrorKind::ConnectionRefused,
))),
)
.await;
let elapsed = Instant::now().duration_since(start);
info!("Duration elapsed: {:?}", elapsed);
assert!(elapsed.as_millis() >= 100);
assert!(elapsed.as_millis() <= 150);
}
};
rt.block_on(events_f.boxed().unit_error().compat()).unwrap();
}
// Test that connectivity manager will still connect to a peer if it advertises
// multiple listen addresses and some of them don't work.
#[test]
fn multiple_addrs_basic() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
// Fake peer manager and discovery.
let f_peer_mgr = async move {
// For this test, the peer advertises multiple listen addresses. Assume
// that the first addr fails to connect while the second addr succeeds.
let seed_addr_1 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9091").unwrap();
let seed_addr_2 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9092").unwrap();
// Send addresses of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_addr_1.clone(), seed_addr_2.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Assume that the first listen addr fails to connect.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_addr_1.clone(),
Err(PeerManagerError::IoError(io::Error::from(
io::ErrorKind::ConnectionRefused,
))),
)
.await;
// Trigger another connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Since the last connection attempt failed for seed_addr_1, we should
// attempt the next available listener address. In this case, the call
// succeeds and we should connect to the peer.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_addr_2.clone(),
Ok(()),
)
.await;
};
rt.block_on(f_peer_mgr.boxed().unit_error().compat())
.unwrap();
}
// Test that connectivity manager will work with multiple addresses even if we
// retry more times than there are addresses.
#[test]
fn multiple_addrs_wrapping() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
// Fake peer manager and discovery.
let f_peer_mgr = async move {
let seed_addr_1 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9091").unwrap();
let seed_addr_2 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9092").unwrap();
// Send addresses of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_addr_1.clone(), seed_addr_2.clone()],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Assume that the first listen addr fails to connect.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_addr_1.clone(),
Err(PeerManagerError::IoError(io::Error::from(
io::ErrorKind::ConnectionRefused,
))),
)
.await;
// Trigger another connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// The second attempt also fails.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_addr_2.clone(),
Err(PeerManagerError::IoError(io::Error::from(
io::ErrorKind::ConnectionRefused,
))),
)
.await;
// Trigger another connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Our next attempt should wrap around to the first address.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_addr_1.clone(),
Ok(()),
)
.await;
};
rt.block_on(f_peer_mgr.boxed().unit_error().compat())
.unwrap();
}
// Test that connectivity manager will still work when dialing a peer with
// multiple listen addrs and then that peer advertises a smaller number of addrs.
#[test]
fn multiple_addrs_shrinking() {
::logger::try_init_for_testing();
let mut rt = Runtime::new().unwrap();
let seed_peer_id = PeerId::random();
info!("Seed peer_id is {}", seed_peer_id.short_str());
let (mut peer_mgr_reqs_rx, mut peer_mgr_notifs_tx, mut conn_mgr_reqs_tx, mut ticker_tx) =
setup_conn_mgr(&mut rt, seed_peer_id);
// Fake peer manager and discovery.
let f_peer_mgr = async move {
let seed_addr_1 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9091").unwrap();
let seed_addr_2 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9092").unwrap();
let seed_addr_3 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9092").unwrap();
// Send addresses of seed peer.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![
seed_addr_1.clone(),
seed_addr_2.clone(),
seed_addr_3.clone(),
],
))
.await
.unwrap();
// Trigger connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// Assume that the first listen addr fails to connect.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_addr_1,
Err(PeerManagerError::IoError(io::Error::from(
io::ErrorKind::ConnectionRefused,
))),
)
.await;
let seed_addr_4 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9094").unwrap();
let seed_addr_5 = Multiaddr::from_str("/ip4/127.0.0.1/tcp/9095").unwrap();
// The peer issues a new, smaller set of listen addrs.
info!("Sending address of seed peer");
conn_mgr_reqs_tx
.send(ConnectivityRequest::UpdateAddresses(
seed_peer_id,
vec![seed_addr_4.clone(), seed_addr_5.clone()],
))
.await
.unwrap();
// Trigger another connectivity check.
info!("Sending tick to trigger connectivity check");
ticker_tx.send(()).await.unwrap();
// After updating the addresses, we should dial the first new address,
// seed_addr_4 in this case.
info!("Waiting to receive dial request");
expect_dial_request(
&mut peer_mgr_reqs_rx,
&mut peer_mgr_notifs_tx,
&mut conn_mgr_reqs_tx,
seed_peer_id,
seed_addr_4,
Ok(()),
)
.await;
};
rt.block_on(f_peer_mgr.boxed().unit_error().compat())
.unwrap();
}
| 34.990033 | 100 | 0.587954 |
9158cbcc04e1cb4586ffa3609d0d6422be15c910 | 6,949 | use clippy_utils::diagnostics::span_lint_and_then;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::{Impl, ItemKind, Node, Path, QPath, TraitRef, TyKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty::AssocKind;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Symbol;
use rustc_span::Span;
use std::collections::{BTreeMap, BTreeSet};
declare_clippy_lint! {
/// ### What it does
/// It lints if a struct has two methods with the same name:
/// one from a trait, another not from trait.
///
/// ### Why is this bad?
/// Confusing.
///
/// ### Example
/// ```rust
/// trait T {
/// fn foo(&self) {}
/// }
///
/// struct S;
///
/// impl T for S {
/// fn foo(&self) {}
/// }
///
/// impl S {
/// fn foo(&self) {}
/// }
/// ```
#[clippy::version = "1.57.0"]
pub SAME_NAME_METHOD,
restriction,
"two method with same name"
}
declare_lint_pass!(SameNameMethod => [SAME_NAME_METHOD]);
struct ExistingName {
impl_methods: BTreeMap<Symbol, Span>,
trait_methods: BTreeMap<Symbol, Vec<Span>>,
}
impl<'tcx> LateLintPass<'tcx> for SameNameMethod {
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
let mut map = FxHashMap::<Res, ExistingName>::default();
for id in cx.tcx.hir().items() {
if matches!(cx.tcx.def_kind(id.def_id), DefKind::Impl)
&& let item = cx.tcx.hir().item(id)
&& let ItemKind::Impl(Impl {
items,
of_trait,
self_ty,
..
}) = &item.kind
&& let TyKind::Path(QPath::Resolved(_, Path { res, .. })) = self_ty.kind
{
if !map.contains_key(res) {
map.insert(
*res,
ExistingName {
impl_methods: BTreeMap::new(),
trait_methods: BTreeMap::new(),
},
);
}
let existing_name = map.get_mut(res).unwrap();
match of_trait {
Some(trait_ref) => {
let mut methods_in_trait: BTreeSet<Symbol> = if_chain! {
if let Some(Node::TraitRef(TraitRef { path, .. })) =
cx.tcx.hir().find(trait_ref.hir_ref_id);
if let Res::Def(DefKind::Trait, did) = path.res;
then{
// FIXME: if
// `rustc_middle::ty::assoc::AssocItems::items` is public,
// we can iterate its keys instead of `in_definition_order`,
// which's more efficient
cx.tcx
.associated_items(did)
.in_definition_order()
.filter(|assoc_item| {
matches!(assoc_item.kind, AssocKind::Fn)
})
.map(|assoc_item| assoc_item.name)
.collect()
}else{
BTreeSet::new()
}
};
let mut check_trait_method = |method_name: Symbol, trait_method_span: Span| {
if let Some(impl_span) = existing_name.impl_methods.get(&method_name) {
span_lint_and_then(
cx,
SAME_NAME_METHOD,
*impl_span,
"method's name is the same as an existing method in a trait",
|diag| {
diag.span_note(
trait_method_span,
&format!("existing `{}` defined here", method_name),
);
},
);
}
if let Some(v) = existing_name.trait_methods.get_mut(&method_name) {
v.push(trait_method_span);
} else {
existing_name.trait_methods.insert(method_name, vec![trait_method_span]);
}
};
for impl_item_ref in (*items).iter().filter(|impl_item_ref| {
matches!(impl_item_ref.kind, rustc_hir::AssocItemKind::Fn { .. })
}) {
let method_name = impl_item_ref.ident.name;
methods_in_trait.remove(&method_name);
check_trait_method(method_name, impl_item_ref.span);
}
for method_name in methods_in_trait {
check_trait_method(method_name, item.span);
}
},
None => {
for impl_item_ref in (*items).iter().filter(|impl_item_ref| {
matches!(impl_item_ref.kind, rustc_hir::AssocItemKind::Fn { .. })
}) {
let method_name = impl_item_ref.ident.name;
let impl_span = impl_item_ref.span;
if let Some(trait_spans) = existing_name.trait_methods.get(&method_name) {
span_lint_and_then(
cx,
SAME_NAME_METHOD,
impl_span,
"method's name is the same as an existing method in a trait",
|diag| {
// TODO should we `span_note` on every trait?
// iterate on trait_spans?
diag.span_note(
trait_spans[0],
&format!("existing `{}` defined here", method_name),
);
},
);
}
existing_name.impl_methods.insert(method_name, impl_span);
}
},
}
}
}
}
}
| 42.631902 | 105 | 0.397179 |
5b8699bdd0d8a2171eea78e2406aab5d2cb6edb3 | 37 | pub mod supervisor;
pub mod watcher;
| 12.333333 | 19 | 0.783784 |
561442e000a2f80c9d7d7f91f0e26af7fcd91494 | 112,342 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, models::*, API_VERSION};
pub mod operations {
use super::{models, models::*, API_VERSION};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Storage/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod skus {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<StorageSkuListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Storage/skus",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageSkuListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod storage_accounts {
use super::{models, models::*, API_VERSION};
pub async fn check_name_availability(
operation_config: &crate::OperationConfig,
account_name: &StorageAccountCheckNameAvailabilityParameters,
subscription_id: &str,
) -> std::result::Result<CheckNameAvailabilityResult, check_name_availability::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Storage/checkNameAvailability",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(check_name_availability::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(check_name_availability::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(account_name).map_err(check_name_availability::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(check_name_availability::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(check_name_availability::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: CheckNameAvailabilityResult = serde_json::from_slice(rsp_body)
.map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(check_name_availability::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod check_name_availability {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_properties(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
expand: Option<&str>,
) -> std::result::Result<StorageAccount, get_properties::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(get_properties::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_properties::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_properties::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_properties::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccount =
serde_json::from_slice(rsp_body).map_err(|source| get_properties::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_properties::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_properties {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
parameters: &StorageAccountCreateParameters,
subscription_id: &str,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccount =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(create::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(StorageAccount),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
parameters: &StorageAccountUpdateParameters,
subscription_id: &str,
) -> std::result::Result<StorageAccount, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccount =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<StorageAccountListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Storage/storageAccounts",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccountListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<StorageAccountListResult, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccountListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_resource_group::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_resource_group {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
) -> std::result::Result<StorageAccountListKeysResult, list_keys::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/listKeys",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(list_keys::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_keys::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_keys::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_keys::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccountListKeysResult =
serde_json::from_slice(rsp_body).map_err(|source| list_keys::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_keys::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_keys {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn regenerate_key(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
regenerate_key: &StorageAccountRegenerateKeyParameters,
subscription_id: &str,
) -> std::result::Result<StorageAccountListKeysResult, regenerate_key::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/regenerateKey",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(regenerate_key::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(regenerate_key::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(regenerate_key).map_err(regenerate_key::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(regenerate_key::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(regenerate_key::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccountListKeysResult =
serde_json::from_slice(rsp_body).map_err(|source| regenerate_key::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(regenerate_key::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod regenerate_key {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_account_sas(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
parameters: &AccountSasParameters,
subscription_id: &str,
) -> std::result::Result<ListAccountSasResponse, list_account_sas::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/ListAccountSas",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(list_account_sas::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_account_sas::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(list_account_sas::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_account_sas::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_account_sas::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ListAccountSasResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_account_sas::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_account_sas::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_account_sas {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_service_sas(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
parameters: &ServiceSasParameters,
subscription_id: &str,
) -> std::result::Result<ListServiceSasResponse, list_service_sas::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/ListServiceSas",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(list_service_sas::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_service_sas::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(list_service_sas::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_service_sas::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_service_sas::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ListServiceSasResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_service_sas::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_service_sas::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_service_sas {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn failover(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
) -> std::result::Result<failover::Response, failover::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/failover",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(failover::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(failover::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(failover::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(failover::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(failover::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(failover::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(failover::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod failover {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod usages {
use super::{models, models::*, API_VERSION};
pub async fn list_by_location(
operation_config: &crate::OperationConfig,
subscription_id: &str,
location: &str,
) -> std::result::Result<UsageListResult, list_by_location::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Storage/locations/{}/usages",
operation_config.base_path(),
subscription_id,
location
);
let mut url = url::Url::parse(url_str).map_err(list_by_location::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_location::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_location::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_location::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: UsageListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_location::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_location::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_location {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod blob_services {
use super::{models, models::*, API_VERSION};
pub async fn get_service_properties(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
blob_services_name: &str,
) -> std::result::Result<BlobServiceProperties, get_service_properties::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
blob_services_name
);
let mut url = url::Url::parse(url_str).map_err(get_service_properties::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_service_properties::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_service_properties::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_service_properties::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BlobServiceProperties = serde_json::from_slice(rsp_body)
.map_err(|source| get_service_properties::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_service_properties::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_service_properties {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn set_service_properties(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
blob_services_name: &str,
parameters: &BlobServiceProperties,
) -> std::result::Result<BlobServiceProperties, set_service_properties::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
blob_services_name
);
let mut url = url::Url::parse(url_str).map_err(set_service_properties::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(set_service_properties::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(set_service_properties::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(set_service_properties::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(set_service_properties::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BlobServiceProperties = serde_json::from_slice(rsp_body)
.map_err(|source| set_service_properties::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(set_service_properties::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod set_service_properties {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod blob_containers {
use super::{models, models::*, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
) -> std::result::Result<ListContainerItems, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ListContainerItems =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
subscription_id: &str,
) -> std::result::Result<BlobContainer, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BlobContainer =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
blob_container: &BlobContainer,
subscription_id: &str,
) -> std::result::Result<BlobContainer, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(blob_container).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: BlobContainer =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(create::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
blob_container: &BlobContainer,
subscription_id: &str,
) -> std::result::Result<BlobContainer, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(blob_container).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BlobContainer =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn set_legal_hold(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
subscription_id: &str,
legal_hold: &LegalHold,
) -> std::result::Result<LegalHold, set_legal_hold::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/setLegalHold" , operation_config . base_path () , subscription_id , resource_group_name , account_name , container_name) ;
let mut url = url::Url::parse(url_str).map_err(set_legal_hold::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(set_legal_hold::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(legal_hold).map_err(set_legal_hold::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(set_legal_hold::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(set_legal_hold::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LegalHold =
serde_json::from_slice(rsp_body).map_err(|source| set_legal_hold::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(set_legal_hold::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod set_legal_hold {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn clear_legal_hold(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
subscription_id: &str,
legal_hold: &LegalHold,
) -> std::result::Result<LegalHold, clear_legal_hold::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/clearLegalHold" , operation_config . base_path () , subscription_id , resource_group_name , account_name , container_name) ;
let mut url = url::Url::parse(url_str).map_err(clear_legal_hold::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(clear_legal_hold::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(legal_hold).map_err(clear_legal_hold::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(clear_legal_hold::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(clear_legal_hold::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LegalHold = serde_json::from_slice(rsp_body)
.map_err(|source| clear_legal_hold::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(clear_legal_hold::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod clear_legal_hold {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_immutability_policy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
immutability_policy_name: &str,
subscription_id: &str,
if_match: Option<&str>,
) -> std::result::Result<ImmutabilityPolicy, get_immutability_policy::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/immutabilityPolicies/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , container_name , immutability_policy_name) ;
let mut url = url::Url::parse(url_str).map_err(get_immutability_policy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_immutability_policy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(if_match) = if_match {
req_builder = req_builder.header("If-Match", if_match);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_immutability_policy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_immutability_policy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ImmutabilityPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| get_immutability_policy::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_immutability_policy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_immutability_policy {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_immutability_policy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
immutability_policy_name: &str,
subscription_id: &str,
parameters: Option<&ImmutabilityPolicy>,
if_match: Option<&str>,
) -> std::result::Result<ImmutabilityPolicy, create_or_update_immutability_policy::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/immutabilityPolicies/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , container_name , immutability_policy_name) ;
let mut url = url::Url::parse(url_str).map_err(create_or_update_immutability_policy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_immutability_policy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(parameters) = parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(create_or_update_immutability_policy::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
if let Some(if_match) = if_match {
req_builder = req_builder.header("If-Match", if_match);
}
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_immutability_policy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_immutability_policy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ImmutabilityPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_immutability_policy::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update_immutability_policy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update_immutability_policy {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_immutability_policy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
immutability_policy_name: &str,
subscription_id: &str,
if_match: &str,
) -> std::result::Result<ImmutabilityPolicy, delete_immutability_policy::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/immutabilityPolicies/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , container_name , immutability_policy_name) ;
let mut url = url::Url::parse(url_str).map_err(delete_immutability_policy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_immutability_policy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("If-Match", if_match);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(delete_immutability_policy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_immutability_policy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ImmutabilityPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| delete_immutability_policy::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(delete_immutability_policy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete_immutability_policy {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn lock_immutability_policy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
subscription_id: &str,
if_match: &str,
) -> std::result::Result<ImmutabilityPolicy, lock_immutability_policy::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/immutabilityPolicies/default/lock" , operation_config . base_path () , subscription_id , resource_group_name , account_name , container_name) ;
let mut url = url::Url::parse(url_str).map_err(lock_immutability_policy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(lock_immutability_policy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("If-Match", if_match);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(lock_immutability_policy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(lock_immutability_policy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ImmutabilityPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| lock_immutability_policy::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(lock_immutability_policy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod lock_immutability_policy {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn extend_immutability_policy(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
subscription_id: &str,
parameters: Option<&ImmutabilityPolicy>,
if_match: &str,
) -> std::result::Result<ImmutabilityPolicy, extend_immutability_policy::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/immutabilityPolicies/default/extend" , operation_config . base_path () , subscription_id , resource_group_name , account_name , container_name) ;
let mut url = url::Url::parse(url_str).map_err(extend_immutability_policy::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(extend_immutability_policy::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(parameters) = parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(extend_immutability_policy::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.header("If-Match", if_match);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(extend_immutability_policy::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(extend_immutability_policy::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ImmutabilityPolicy = serde_json::from_slice(rsp_body)
.map_err(|source| extend_immutability_policy::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(extend_immutability_policy::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod extend_immutability_policy {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn lease(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
container_name: &str,
subscription_id: &str,
parameters: Option<&LeaseContainerRequest>,
) -> std::result::Result<LeaseContainerResponse, lease::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Storage/storageAccounts/{}/blobServices/default/containers/{}/lease",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(lease::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(lease::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(parameters) = parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(lease::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(lease::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(lease::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LeaseContainerResponse =
serde_json::from_slice(rsp_body).map_err(|source| lease::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(lease::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod lease {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod management_policies {
use super::{models, models::*, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
management_policy_name: &str,
) -> std::result::Result<StorageAccountManagementPolicies, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/managementPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
management_policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccountManagementPolicies =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
management_policy_name: &str,
properties: &ManagementPoliciesRulesSetParameter,
) -> std::result::Result<StorageAccountManagementPolicies, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/managementPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
management_policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(properties).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: StorageAccountManagementPolicies = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use super::{models, models::*, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
account_name: &str,
subscription_id: &str,
management_policy_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}/managementPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
account_name,
management_policy_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use super::{models, models::*, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
| 49.251206 | 315 | 0.595761 |
6a97cb04a82226722311261e45d6f8100f01d771 | 168,014 | // Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use log::{error, info, warn};
mod bitfield_queue;
mod deadline_assignment;
mod deadline_state;
mod deadlines;
mod expiration_queue;
mod monies;
mod partition_state;
mod policy;
mod sector_map;
mod sectors;
mod state;
mod termination;
mod types;
mod vesting_state;
pub use bitfield_queue::*;
pub use deadline_assignment::*;
pub use deadline_state::*;
pub use deadlines::*;
pub use expiration_queue::*;
pub use monies::*;
pub use partition_state::*;
pub use policy::*;
pub use sector_map::*;
pub use sectors::*;
pub use state::*;
pub use termination::*;
pub use types::*;
pub use vesting_state::*;
use crate::{
account::Method as AccountMethod,
actor_error,
market::{self, ActivateDealsParams, ComputeDataCommitmentReturn, SectorDataSpec, SectorDeals},
power::MAX_MINER_PROVE_COMMITS_PER_EPOCH,
};
use crate::{
is_principal, smooth::FilterEstimate, ACCOUNT_ACTOR_CODE_ID, BURNT_FUNDS_ACTOR_ADDR,
CALLER_TYPES_SIGNABLE, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR,
STORAGE_POWER_ACTOR_ADDR,
};
use crate::{
market::{
ComputeDataCommitmentParamsRef, Method as MarketMethod, OnMinerSectorsTerminateParams,
OnMinerSectorsTerminateParamsRef, VerifyDealsForActivationParamsRef,
VerifyDealsForActivationReturn,
},
power::CurrentTotalPowerReturn,
};
use crate::{
power::{EnrollCronEventParams, Method as PowerMethod},
reward::ThisEpochRewardReturn,
ActorDowncast,
};
use address::{Address, Payload, Protocol};
use bitfield::{BitField, UnvalidatedBitField, Validate};
use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
use cid::{Cid, Code::Blake2b256, Prefix};
use clock::ChainEpoch;
use crypto::DomainSeparationTag::{
self, InteractiveSealChallengeSeed, SealRandomness, WindowedPoStChallengeSeed,
};
use encoding::{from_slice, BytesDe, Cbor};
use fil_types::{
deadlines::DeadlineInfo, AggregateSealVerifyInfo, AggregateSealVerifyProofAndInfos,
InteractiveSealRandomness, PoStProof, PoStRandomness, RegisteredPoStProof, RegisteredSealProof,
SealRandomness as SealRandom, SealVerifyInfo, SealVerifyParams, SectorID, SectorInfo,
SectorNumber, SectorSize, WindowPoStVerifyInfo, MAX_SECTOR_NUMBER, RANDOMNESS_LENGTH,
};
use ipld_blockstore::BlockStore;
use num_bigint::BigInt;
use num_bigint::{bigint_ser::BigIntSer, Integer};
use num_derive::FromPrimitive;
use num_traits::{FromPrimitive, Signed, Zero};
use runtime::{ActorCode, Runtime};
use std::collections::{hash_map::Entry, HashMap};
use std::error::Error as StdError;
use std::{iter, ops::Neg};
use vm::{
ActorError, DealID, ExitCode, MethodNum, Serialized, TokenAmount, METHOD_CONSTRUCTOR,
METHOD_SEND,
};
// The first 1000 actor-specific codes are left open for user error, i.e. things that might
// actually happen without programming error in the actor code.
// The following errors are particular cases of illegal state.
// They're not expected to ever happen, but if they do, distinguished codes can help us
// diagnose the problem.
use ExitCode::ErrPlaceholder as ErrBalanceInvariantBroken;
// * Updated to specs-actors commit: 17d3c602059e5c48407fb3c34343da87e6ea6586 (v0.9.12)
/// Storage Miner actor methods available
#[derive(FromPrimitive)]
#[repr(u64)]
pub enum Method {
Constructor = METHOD_CONSTRUCTOR,
ControlAddresses = 2,
ChangeWorkerAddress = 3,
ChangePeerID = 4,
SubmitWindowedPoSt = 5,
PreCommitSector = 6,
ProveCommitSector = 7,
ExtendSectorExpiration = 8,
TerminateSectors = 9,
DeclareFaults = 10,
DeclareFaultsRecovered = 11,
OnDeferredCronEvent = 12,
CheckSectorProven = 13,
ApplyRewards = 14,
ReportConsensusFault = 15,
WithdrawBalance = 16,
ConfirmSectorProofsValid = 17,
ChangeMultiaddrs = 18,
CompactPartitions = 19,
CompactSectorNumbers = 20,
ConfirmUpdateWorkerKey = 21,
RepayDebt = 22,
ChangeOwnerAddress = 23,
DisputeWindowedPoSt = 24,
PreCommitSectorBatch = 25,
ProveCommitAggregate = 26,
}
/// Miner Actor
/// here in order to update the Power Actor to v3.
pub struct Actor;
impl Actor {
pub fn constructor<BS, RT>(
rt: &mut RT,
params: MinerConstructorParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.validate_immediate_caller_is(&[*INIT_ACTOR_ADDR])?;
check_control_addresses(¶ms.control_addresses)?;
check_peer_info(¶ms.peer_id, ¶ms.multi_addresses)?;
check_valid_post_proof_type(params.window_post_proof_type)?;
let owner = resolve_control_address(rt, params.owner)?;
let worker = resolve_worker_address(rt, params.worker)?;
let control_addresses: Vec<_> = params
.control_addresses
.into_iter()
.map(|address| resolve_control_address(rt, address))
.collect::<Result<_, _>>()?;
let current_epoch = rt.curr_epoch();
let blake2b = |b: &[u8]| rt.hash_blake2b(b);
let offset = assign_proving_period_offset(*rt.message().receiver(), current_epoch, blake2b)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrSerialization,
"failed to assign proving period offset",
)
})?;
let period_start = current_proving_period_start(current_epoch, offset);
if period_start > current_epoch {
return Err(actor_error!(
ErrIllegalState,
"computed proving period start {} after current epoch {}",
period_start,
current_epoch
));
}
let deadline_idx = current_deadline_index(current_epoch, period_start);
if deadline_idx >= WPOST_PERIOD_DEADLINES as usize {
return Err(actor_error!(
ErrIllegalState,
"computed proving deadline index {} invalid",
deadline_idx
));
}
let info = MinerInfo::new(
owner,
worker,
control_addresses,
params.peer_id,
params.multi_addresses,
params.window_post_proof_type,
)?;
let info_cid = rt.store().put(&info, Blake2b256).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to construct illegal state",
)
})?;
let st = State::new(rt.store(), info_cid, period_start, deadline_idx).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to construct state")
})?;
rt.create(&st)?;
Ok(())
}
fn control_addresses<BS, RT>(rt: &mut RT) -> Result<GetControlAddressesReturn, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.validate_immediate_caller_accept_any()?;
let state: State = rt.state()?;
let info = get_miner_info(rt.store(), &state)?;
Ok(GetControlAddressesReturn {
owner: info.owner,
worker: info.worker,
control_addresses: info.control_addresses,
})
}
/// Will ALWAYS overwrite the existing control addresses with the control addresses passed in the params.
/// If an empty addresses vector is passed, the control addresses will be cleared.
/// A worker change will be scheduled if the worker passed in the params is different from the existing worker.
fn change_worker_address<BS, RT>(
rt: &mut RT,
params: ChangeWorkerAddressParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
check_control_addresses(¶ms.new_control_addresses)?;
let new_worker = resolve_worker_address(rt, params.new_worker)?;
let control_addresses: Vec<Address> = params
.new_control_addresses
.into_iter()
.map(|address| resolve_control_address(rt, address))
.collect::<Result<_, _>>()?;
rt.transaction(|state: &mut State, rt| {
let mut info = get_miner_info(rt.store(), state)?;
// Only the Owner is allowed to change the new_worker and control addresses.
rt.validate_immediate_caller_is(std::iter::once(&info.owner))?;
// save the new control addresses
info.control_addresses = control_addresses;
// save new_worker addr key change request
if new_worker != info.worker && info.pending_worker_key.is_none() {
info.pending_worker_key = Some(WorkerKeyChange {
new_worker,
effective_at: rt.curr_epoch() + WORKER_KEY_CHANGE_DELAY,
})
}
state.save_info(rt.store(), &info).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "could not save miner info")
})?;
Ok(())
})?;
Ok(())
}
/// Triggers a worker address change if a change has been requested and its effective epoch has arrived.
fn confirm_update_worker_key<BS, RT>(rt: &mut RT) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.transaction(|state: &mut State, rt| {
let mut info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(std::iter::once(&info.owner))?;
process_pending_worker(&mut info, rt, state)?;
Ok(())
})
}
/// Proposes or confirms a change of owner address.
/// If invoked by the current owner, proposes a new owner address for confirmation. If the proposed address is the
/// current owner address, revokes any existing proposal.
/// If invoked by the previously proposed address, with the same proposal, changes the current owner address to be
/// that proposed address.
fn change_owner_address<BS, RT>(rt: &mut RT, new_address: Address) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
// * Cannot match go checking for undef address, does go impl allow this to be
// * deserialized over the wire? If so, a workaround will be needed
if !matches!(new_address.protocol(), Protocol::ID) {
return Err(actor_error!(
ErrIllegalArgument,
"owner address must be an ID address"
));
}
rt.transaction(|state: &mut State, rt| {
let mut info = get_miner_info(rt.store(), state)?;
if rt.message().caller() == &info.owner || info.pending_owner_address.is_none() {
rt.validate_immediate_caller_is(std::iter::once(&info.owner))?;
info.pending_owner_address = Some(new_address);
} else {
let pending_address = info.pending_owner_address.unwrap();
rt.validate_immediate_caller_is(std::iter::once(&pending_address))?;
if new_address != pending_address {
return Err(actor_error!(
ErrIllegalArgument,
"expected confirmation of {} got {}",
pending_address,
new_address
));
}
info.owner = pending_address;
}
// Clear any no-op change
if let Some(p_addr) = info.pending_owner_address {
if p_addr == info.owner {
info.pending_owner_address = None;
}
}
state.save_info(rt.store(), &info).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save miner info")
})?;
Ok(())
})
}
fn change_peer_id<BS, RT>(rt: &mut RT, params: ChangePeerIDParams) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
check_peer_info(¶ms.new_id, &[])?;
rt.transaction(|state: &mut State, rt| {
let mut info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
info.peer_id = params.new_id;
state.save_info(rt.store(), &info).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "could not save miner info")
})?;
Ok(())
})?;
Ok(())
}
fn change_multiaddresses<BS, RT>(
rt: &mut RT,
params: ChangeMultiaddrsParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
check_peer_info(&[], ¶ms.new_multi_addrs)?;
rt.transaction(|state: &mut State, rt| {
let mut info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
info.multi_address = params.new_multi_addrs;
state.save_info(rt.store(), &info).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "could not save miner info")
})?;
Ok(())
})?;
Ok(())
}
/// Invoked by miner's worker address to submit their fallback post
fn submit_windowed_post<BS, RT>(
rt: &mut RT,
mut params: SubmitWindowedPoStParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let current_epoch = rt.curr_epoch();
if params.proofs.len() != 1 {
return Err(actor_error!(
ErrIllegalArgument,
"expected exactly one proof, got {}",
params.proofs.len()
));
}
if check_valid_post_proof_type(params.proofs[0].post_proof).is_err() {
return Err(actor_error!(
ErrIllegalArgument,
"proof type {:?} not allowed",
params.proofs[0].post_proof
));
}
if params.deadline >= WPOST_PERIOD_DEADLINES as usize {
return Err(actor_error!(
ErrIllegalArgument,
"invalid deadline {} of {}",
params.deadline,
WPOST_PERIOD_DEADLINES
));
}
if params.chain_commit_rand.0.len() > RANDOMNESS_LENGTH {
return Err(actor_error!(
ErrIllegalArgument,
"expected at most {} bytes of randomness, got {}",
RANDOMNESS_LENGTH,
params.chain_commit_rand.0.len()
));
}
let post_result = rt.transaction(|state: &mut State, rt| {
let info = get_miner_info(rt.store(), state)?;
let max_proof_size = info.window_post_proof_type.proof_size().map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to determine max window post proof size: {}",
e
)
})?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
// Verify that the miner has passed exactly 1 proof.
if params.proofs.len() != 1 {
return Err(actor_error!(
ErrIllegalArgument,
"expected exactly one proof, got {}",
params.proofs.len()
));
}
// Make sure the miner is using the correct proof type.
if params.proofs[0].post_proof != info.window_post_proof_type {
return Err(actor_error!(
ErrIllegalArgument,
"expected proof of type {:?}, got {:?}",
params.proofs[0].post_proof,
info.window_post_proof_type
));
}
// Make sure the proof size doesn't exceed the max. We could probably check for an exact match, but this is safer.
let max_size = max_proof_size * params.partitions.len();
if params.proofs[0].proof_bytes.len() > max_size {
return Err(actor_error!(
ErrIllegalArgument,
"expect proof to be smaller than {} bytes",
max_size
));
}
// Validate that the miner didn't try to prove too many partitions at once.
let submission_partition_limit =
load_partitions_sectors_max(info.window_post_partition_sectors);
if params.partitions.len() as u64 > submission_partition_limit {
return Err(actor_error!(
ErrIllegalArgument,
"too many partitions {}, limit {}",
params.partitions.len(),
submission_partition_limit
));
}
let current_deadline = state.deadline_info(current_epoch);
// Check that the miner state indicates that the current proving deadline has started.
// This should only fail if the cron actor wasn't invoked, and matters only in case that it hasn't been
// invoked for a whole proving period, and hence the missed PoSt submissions from the prior occurrence
// of this deadline haven't been processed yet.
if !current_deadline.is_open() {
return Err(actor_error!(
ErrIllegalState,
"proving period {} not yet open at {}",
current_deadline.period_start,
current_epoch
));
}
// The miner may only submit a proof for the current deadline.
if params.deadline != current_deadline.index as usize {
return Err(actor_error!(
ErrIllegalArgument,
"invalid deadline {} at epoch {}, expected {}",
params.deadline,
current_epoch,
current_deadline.index
));
}
// Verify that the PoSt was committed to the chain at most
// WPoStChallengeLookback+WPoStChallengeWindow in the past.
if params.chain_commit_epoch < current_deadline.challenge {
return Err(actor_error!(
ErrIllegalArgument,
"expected chain commit epoch {} to be after {}",
params.chain_commit_epoch,
current_deadline.challenge
));
}
if params.chain_commit_epoch >= current_epoch {
return Err(actor_error!(
ErrIllegalArgument,
"chain commit epoch {} must be less tha the current epoch {}",
params.chain_commit_epoch,
current_epoch
));
}
// Verify the chain commit randomness
let comm_rand = rt.get_randomness_from_tickets(
DomainSeparationTag::PoStChainCommit,
params.chain_commit_epoch,
&[],
)?;
if comm_rand != params.chain_commit_rand {
return Err(actor_error!(
ErrIllegalArgument,
"post commit randomness mismatched"
));
}
let sectors = Sectors::load(rt.store(), &state.sectors).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load sectors")
})?;
let mut deadlines = state
.load_deadlines(rt.store())
.map_err(|e| e.wrap("failed to load deadlines"))?;
let mut deadline = deadlines
.load_deadline(rt.store(), params.deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load deadline {}", params.deadline),
)
})?;
// Record proven sectors/partitions, returning updates to power and the final set of sectors
// proven/skipped.
//
// NOTE: This function does not actually check the proofs but does assume that they're correct. Instead,
// it snapshots the deadline's state and the submitted proofs at the end of the challenge window and
// allows third-parties to dispute these proofs.
//
// While we could perform _all_ operations at the end of challenge window, we do as we can here to avoid
// overloading cron.
let fault_expiration = current_deadline.last() + FAULT_MAX_AGE;
let post_result = deadline
.record_proven_sectors(
rt.store(),
§ors,
info.sector_size,
current_deadline.quant_spec(),
fault_expiration,
&mut params.partitions,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!(
"failed to process post submission for deadline {}",
params.deadline
),
)
})?;
// Make sure we actually proved something.
let proven_sectors = &post_result.sectors - &post_result.ignored_sectors;
if proven_sectors.is_empty() {
// Abort verification if all sectors are (now) faults. There's nothing to prove.
// It's not rational for a miner to submit a Window PoSt marking *all* non-faulty sectors as skipped,
// since that will just cause them to pay a penalty at deadline end that would otherwise be zero
// if they had *not* declared them.
return Err(actor_error!(
ErrIllegalArgument,
"cannot prove partitions with no active sectors"
));
}
// If we're not recovering power, record the proof for optimistic verification.
if post_result.recovered_power.is_zero() {
deadline
.record_post_proofs(rt.store(), &post_result.partitions, ¶ms.proofs)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to record proof for optimistic verification",
)
})?
} else {
// Load sector infos for proof, substituting a known-good sector for known-faulty sectors.
// Note: this is slightly sub-optimal, loading info for the recovering sectors again after they were already
// loaded above.
let sector_infos = sectors
.load_for_proof(&post_result.sectors, &post_result.ignored_sectors)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to load sectors for post verification",
)
})?;
verify_windowed_post(rt, current_deadline.challenge, §or_infos, params.proofs)
.map_err(|e| e.wrap("window post failed"))?;
}
let deadline_idx = params.deadline;
deadlines
.update_deadline(rt.store(), params.deadline, &deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to update deadline {}", deadline_idx),
)
})?;
state.save_deadlines(rt.store(), deadlines).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save deadlines")
})?;
Ok(post_result)
})?;
// Restore power for recovered sectors. Remove power for new faults.
// NOTE: It would be permissible to delay the power loss until the deadline closes, but that would require
// additional accounting state.
// https://github.com/filecoin-project/specs-actors/issues/414
request_update_power(rt, post_result.power_delta)?;
let state: State = rt.state()?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(())
}
/// Checks state of the corresponding sector pre-commitments and verifies aggregate proof of replication
/// of these sectors. If valid, the sectors' deals are activated, sectors are assigned a deadline and charged pledge
/// and precommit state is removed.
fn prove_commit_aggregate<BS, RT>(
rt: &mut RT,
mut params: ProveCommitAggregateParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let sector_numbers = params.sector_numbers.validate().map_err(|e| {
actor_error!(
ErrIllegalState,
"Failed to validate bitfield for aggregated sectors: {}",
e
)
})?;
let agg_sectors_count = sector_numbers.len();
if agg_sectors_count > MAX_AGGREGATED_SECTORS {
return Err(actor_error!(
ErrIllegalArgument,
"too many sectors addressed, addressed {} want <= {}",
agg_sectors_count,
MAX_AGGREGATED_SECTORS
));
} else if agg_sectors_count < MIN_AGGREGATED_SECTORS {
return Err(actor_error!(
ErrIllegalArgument,
"too few sectors addressed, addressed {} want >= {}",
agg_sectors_count,
MIN_AGGREGATED_SECTORS
));
}
if params.aggregate_proof.len() > MAX_AGGREGATED_PROOF_SIZE {
return Err(actor_error!(
ErrIllegalArgument,
"sector prove-commit proof of size {} exceeds max size of {}",
params.aggregate_proof.len(),
MAX_AGGREGATED_PROOF_SIZE
));
}
let state: State = rt.state()?;
let info = get_miner_info(rt.store(), &state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
let store = rt.store();
let precommits = state
.get_all_precommitted_sectors(store, sector_numbers)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to get precommits")
})?;
// compute data commitments and validate each precommit
let mut compute_data_commitments_inputs = Vec::with_capacity(precommits.len());
let mut precommits_to_confirm = Vec::new();
for (i, precommit) in precommits.iter().enumerate() {
let msd = max_prove_commit_duration(precommit.info.seal_proof).ok_or_else(|| {
actor_error!(
ErrIllegalState,
"no max seal duration for proof type: {}",
i64::from(precommit.info.seal_proof)
)
})?;
let prove_commit_due = precommit.pre_commit_epoch + msd;
if rt.curr_epoch() > prove_commit_due {
log::warn!(
"skipping commitment for sector {}, too late at {}, due {}",
precommit.info.sector_number,
rt.curr_epoch(),
prove_commit_due,
)
} else {
precommits_to_confirm.push(precommit.clone());
}
// All seal proof types should match
if i >= 1 {
let prev_seal_proof = precommits[i - 1].info.seal_proof;
if prev_seal_proof != precommit.info.seal_proof {
return Err(actor_error!(
ErrIllegalState,
"aggregate contains mismatched seal proofs {} and {}",
i64::from(prev_seal_proof),
i64::from(precommit.info.seal_proof)
));
}
}
compute_data_commitments_inputs.push(SectorDataSpec {
deal_ids: precommit.info.deal_ids.clone(),
sector_type: precommit.info.seal_proof,
});
}
let comm_ds = request_unsealed_sector_cids(rt, &compute_data_commitments_inputs)?;
let mut svis = Vec::new();
let miner_actor_id: u64 = if let Payload::ID(i) = rt.message().receiver().payload() {
*i
} else {
return Err(actor_error!(
ErrIllegalState,
"runtime provided non-ID receiver address {}",
rt.message().receiver()
));
};
let receiver_bytes = rt.message().receiver().marshal_cbor().map_err(|e| {
ActorError::from(e).wrap("failed to marshal address for seal verification challenge")
})?;
for (i, precommit) in precommits.iter().enumerate() {
let interactive_epoch = precommit.pre_commit_epoch + PRE_COMMIT_CHALLENGE_DELAY;
if rt.curr_epoch() <= interactive_epoch {
return Err(actor_error!(
ErrForbidden,
"too early to prove sector {}",
precommit.info.sector_number
));
}
let sv_info_randomness = rt.get_randomness_from_tickets(
SealRandomness,
precommit.info.seal_rand_epoch,
&receiver_bytes,
)?;
let sv_info_interactive_randomness = rt.get_randomness_from_beacon(
InteractiveSealChallengeSeed,
interactive_epoch,
&receiver_bytes,
)?;
let svi = AggregateSealVerifyInfo {
sector_number: precommit.info.sector_number,
randomness: sv_info_randomness,
interactive_randomness: sv_info_interactive_randomness,
sealed_cid: precommit.info.sealed_cid,
unsealed_cid: comm_ds[i],
};
svis.push(svi);
}
let seal_proof = precommits[0].info.seal_proof;
if precommits.is_empty() {
return Err(actor_error!(
ErrIllegalState,
"bitfield non-empty but zero precommits read from state"
));
}
rt.verify_aggregate_seals(&AggregateSealVerifyProofAndInfos {
miner: miner_actor_id,
seal_proof,
aggregate_proof: fil_types::RegisteredAggregateProof::SnarkPackV1,
proof: params.aggregate_proof,
infos: svis,
})
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalArgument, "aggregate seal verify failed")
})?;
let rew = request_current_epoch_block_reward(rt)?;
let pwr = request_current_total_power(rt)?;
confirm_sector_proofs_valid_internal(
rt,
precommits_to_confirm.clone(),
&rew.this_epoch_baseline_power,
&rew.this_epoch_reward_smoothed,
&pwr.quality_adj_power_smoothed,
)?;
// Compute and burn the aggregate network fee. We need to re-load the state as
// confirmSectorProofsValid can change it.
let state: State = rt.state()?;
let aggregate_fee =
aggregate_prove_commit_network_fee(precommits_to_confirm.len() as i64, rt.base_fee());
let unlocked_balance = state
.get_unlocked_balance(&rt.current_balance()?)
.map_err(|_e| actor_error!(ErrIllegalState, "failed to determine unlocked balance"))?;
if unlocked_balance < aggregate_fee {
return Err(actor_error!(
ErrInsufficientFunds,
"remaining unlocked funds after prove-commit {} are insufficient to pay aggregation fee of {}",
unlocked_balance,
aggregate_fee
));
}
burn_funds(rt, aggregate_fee)?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(())
}
fn dispute_windowed_post<BS, RT>(
rt: &mut RT,
params: DisputeWindowedPoStParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.validate_immediate_caller_type(CALLER_TYPES_SIGNABLE.iter())?;
let reporter = *rt.message().caller();
if params.deadline >= WPOST_PERIOD_DEADLINES as usize {
return Err(actor_error!(
ErrIllegalArgument,
"invalid deadline {} of {}",
params.deadline,
WPOST_PERIOD_DEADLINES
));
}
let current_epoch = rt.curr_epoch();
// Note: these are going to be slightly inaccurate as time
// will have moved on from when the post was actually
// submitted.
//
// However, these are estimates _anyways_.
let epoch_reward = request_current_epoch_block_reward(rt)?;
let power_total = request_current_total_power(rt)?;
let (pledge_delta, mut to_burn, power_delta, to_reward) =
rt.transaction(|st: &mut State, rt| {
let dl_info = st.deadline_info(current_epoch);
if !deadline_available_for_optimistic_post_dispute(
dl_info.period_start,
params.deadline,
current_epoch,
) {
return Err(actor_error!(
ErrForbidden,
"can only dispute window posts during the dispute window\
({} epochs after the challenge window closes)",
WPOST_DISPUTE_WINDOW
));
}
let info = get_miner_info(rt.store(), st)?;
// --- check proof ---
// Find the proving period start for the deadline in question.
let mut pp_start = dl_info.period_start;
if dl_info.index < params.deadline as u64 {
pp_start -= WPOST_PROVING_PERIOD
}
let target_deadline = new_deadline_info(pp_start, params.deadline, current_epoch);
// Load the target deadline
let mut deadlines_current = st
.load_deadlines(rt.store())
.map_err(|e| e.wrap("failed to load deadlines"))?;
let mut dl_current = deadlines_current
.load_deadline(rt.store(), params.deadline)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load deadline")
})?;
// Take the post from the snapshot for dispute.
// This operation REMOVES the PoSt from the snapshot so
// it can't be disputed again. If this method fails,
// this operation must be rolled back.
let (partitions, proofs) = dl_current
.take_post_proofs(rt.store(), params.post_index)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to load proof for dispute",
)
})?;
// Load the partition info we need for the dispute.
let mut dispute_info = dl_current
.load_partitions_for_dispute(rt.store(), partitions)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to load partition for dispute",
)
})?;
// This includes power that is no longer active (e.g., due to sector terminations).
// It must only be used for penalty calculations, not power adjustments.
let penalised_power = dispute_info.disputed_power.clone();
// Load sectors for the dispute.
let sectors = Sectors::load(rt.store(), &st.sectors).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load sectors array")
})?;
let sector_infos = sectors
.load_for_proof(
&dispute_info.all_sector_nos,
&dispute_info.ignored_sector_nos,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to load sectors to dispute window post",
)
})?;
// Check proof, we fail if validation succeeds.
if verify_windowed_post(rt, target_deadline.challenge, §or_infos, proofs)? {
return Err(actor_error!(
ErrIllegalArgument,
"failed to dispute valid post"
));
} else {
info!("Successfully disputed post- window post was invalid");
}
// Ok, now we record faults. This always works because
// we don't allow compaction/moving sectors during the
// challenge window.
//
// However, some of these sectors may have been
// terminated. That's fine, we'll skip them.
let fault_expiration_epoch = target_deadline.last() + FAULT_MAX_AGE;
let power_delta = dl_current
.record_faults(
rt.store(),
§ors,
info.sector_size,
quant_spec_for_deadline(&target_deadline),
fault_expiration_epoch,
&mut dispute_info.disputed_sectors,
)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to declare faults")
})?;
deadlines_current
.update_deadline(rt.store(), params.deadline, &dl_current)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to update deadline {}", params.deadline),
)
})?;
st.save_deadlines(rt.store(), deadlines_current)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save deadlines")
})?;
// --- penalties ---
// Calculate the base penalty.
let penalty_base = pledge_penalty_for_invalid_windowpost(
&epoch_reward.this_epoch_reward_smoothed,
&power_total.quality_adj_power_smoothed,
&penalised_power.qa,
);
// Calculate the target reward.
let reward_target =
reward_for_disputed_window_post(info.window_post_proof_type, penalised_power);
// Compute the target penalty by adding the
// base penalty to the target reward. We don't
// take reward out of the penalty as the miner
// could end up receiving a substantial
// portion of their fee back as a reward.
let penalty_target = &penalty_base + &reward_target;
st.apply_penalty(&penalty_target)
.map_err(|e| actor_error!(ErrIllegalState, "failed to apply penalty {}", e))?;
let (penalty_from_vesting, penalty_from_balance) = st
.repay_partial_debt_in_priority_order(
rt.store(),
current_epoch,
&rt.current_balance()?,
)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to pay debt")
})?;
let to_burn = &penalty_from_vesting + &penalty_from_balance;
// Now, move as much of the target reward as
// we can from the burn to the reward.
let to_reward = std::cmp::min(&to_burn, &reward_target);
let to_burn = &to_burn - to_reward;
let pledge_delta = penalty_from_vesting.neg();
Ok((pledge_delta, to_burn, power_delta, to_reward.clone()))
})?;
request_update_power(rt, power_delta)?;
if !to_reward.is_zero() {
if let Err(e) = rt.send(
reporter,
METHOD_SEND,
Serialized::default(),
to_reward.clone(),
) {
error!("failed to send reward: {}", e);
to_burn += to_reward;
}
}
burn_funds(rt, to_burn)?;
notify_pledge_changed(rt, &pledge_delta)?;
let st: State = rt.state()?;
st.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(())
}
/// Pledges to seal and commit a single sector.
/// See PreCommitSectorBatch for details.
/// This method may be deprecated and removed in the future
fn pre_commit_sector<BS, RT>(
rt: &mut RT,
params: PreCommitSectorParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let batch_params = PreCommitSectorBatchParams {
sectors: vec![params],
};
Self::pre_commit_sector_batch(rt, batch_params)
}
/// Pledges the miner to seal and commit some new sectors.
/// The caller specifies sector numbers, sealed sector data CIDs, seal randomness epoch, expiration, and the IDs
/// of any storage deals contained in the sector data. The storage deal proposals must be already submitted
/// to the storage market actor.
/// A pre-commitment may specify an existing committed-capacity sector that the committed sector will replace
/// when proven.
/// This method calculates the sector's power, locks a pre-commit deposit for the sector, stores information about the
/// sector in state and waits for it to be proven or expire.
fn pre_commit_sector_batch<BS, RT>(
rt: &mut RT,
params: PreCommitSectorBatchParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let curr_epoch = rt.curr_epoch();
if params.sectors.is_empty() {
return Err(actor_error!(ErrIllegalArgument, "batch empty"));
} else if params.sectors.len() > PRE_COMMIT_SECTOR_BATCH_MAX_SIZE {
return Err(actor_error!(
ErrIllegalArgument,
"batch of {} too large, max {}",
params.sectors.len(),
PRE_COMMIT_SECTOR_BATCH_MAX_SIZE
));
}
// Check per-sector preconditions before opening state transaction or sending other messages.
let challenge_earliest = curr_epoch - MAX_PRE_COMMIT_RANDOMNESS_LOOKBACK;
let mut sectors_deals = Vec::with_capacity(params.sectors.len());
let mut sector_numbers = BitField::new();
for precommit in params.sectors.iter() {
let set = sector_numbers.get(precommit.sector_number as usize);
if set {
return Err(actor_error!(
ErrIllegalArgument,
"duplicate sector number {}",
precommit.sector_number
));
}
sector_numbers.set(precommit.sector_number as usize);
if !can_pre_commit_seal_proof(precommit.seal_proof) {
return Err(actor_error!(
ErrIllegalArgument,
"unsupported seal proof type {}",
i64::from(precommit.seal_proof)
));
}
if precommit.sector_number > MAX_SECTOR_NUMBER {
return Err(actor_error!(
ErrIllegalArgument,
"sector number {} out of range 0..(2^63-1)",
precommit.sector_number
));
}
// Skip checking if CID is defined because it cannot be so in Rust
if Prefix::from(precommit.sealed_cid) != SEALED_CID_PREFIX {
return Err(actor_error!(
ErrIllegalArgument,
"sealed CID had wrong prefix"
));
}
if precommit.seal_rand_epoch >= curr_epoch {
return Err(actor_error!(
ErrIllegalArgument,
"seal challenge epoch {} must be before now {}",
precommit.seal_rand_epoch,
curr_epoch
));
}
if precommit.seal_rand_epoch < challenge_earliest {
return Err(actor_error!(
ErrIllegalArgument,
"seal challenge epoch {} too old, must be after {}",
precommit.seal_rand_epoch,
challenge_earliest
));
}
// Require sector lifetime meets minimum by assuming activation happens at last epoch permitted for seal proof.
// This could make sector maximum lifetime validation more lenient if the maximum sector limit isn't hit first.
let max_activation =
curr_epoch + max_prove_commit_duration(precommit.seal_proof).unwrap_or_default();
validate_expiration(
rt,
max_activation,
precommit.expiration,
precommit.seal_proof,
)?;
if precommit.replace_capacity && precommit.deal_ids.is_empty() {
return Err(actor_error!(
ErrIllegalArgument,
"cannot replace sector without committing deals"
));
}
if precommit.replace_sector_deadline as u64 >= WPOST_PERIOD_DEADLINES {
return Err(actor_error!(
ErrIllegalArgument,
"invalid deadline {}",
precommit.replace_sector_deadline
));
}
if precommit.replace_sector_number > MAX_SECTOR_NUMBER {
return Err(actor_error!(
ErrIllegalArgument,
"invalid sector number {}",
precommit.replace_sector_number
));
}
sectors_deals.push(SectorDeals {
sector_expiry: precommit.expiration,
deal_ids: precommit.deal_ids.clone(),
})
}
// gather information from other actors
let reward_stats = request_current_epoch_block_reward(rt)?;
let power_total = request_current_total_power(rt)?;
let deal_weights = request_deal_weights(rt, §ors_deals)?;
if deal_weights.sectors.len() != params.sectors.len() {
return Err(actor_error!(
ErrIllegalState,
"deal weight request returned {} records, expected {}",
deal_weights.sectors.len(),
params.sectors.len()
));
}
let mut fee_to_burn = TokenAmount::from(0);
let mut needs_cron = false;
rt.transaction(|state: &mut State, rt|{
// Aggregate fee applies only when batching.
if params.sectors.len() > 1 {
let aggregate_fee = aggregate_pre_commit_network_fee(params.sectors.len() as i64, rt.base_fee());
// AggregateFee applied to fee debt to consolidate burn with outstanding debts
state.apply_penalty(&aggregate_fee)
.map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to apply penalty: {}",
e
)
})?;
}
// available balance already accounts for fee debt so it is correct to call
// this before RepayDebts. We would have to
// subtract fee debt explicitly if we called this after.
let available_balance = state
.get_available_balance(&rt.current_balance()?)
.map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to calculate available balance: {}",
e
)
})?;
fee_to_burn = repay_debts_or_abort(rt, state)?;
let info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
let store = rt.store();
if consensus_fault_active(&info, curr_epoch) {
return Err(actor_error!(ErrForbidden, "pre-commit not allowed during active consensus fault"));
}
let mut chain_infos = Vec::with_capacity(params.sectors.len());
let mut total_deposit_required= BigInt::zero();
let mut clean_up_events: HashMap<ChainEpoch,Vec<u64>> = HashMap::new();
let deal_count_max = sector_deals_max(info.sector_size);
for (i, precommit) in params.sectors.iter().enumerate() {
// Sector must have the same Window PoSt proof type as the miner's recorded seal type.
let sector_wpost_proof = precommit.seal_proof
.registered_window_post_proof()
.map_err(|_e|
actor_error!(
ErrIllegalArgument,
"failed to lookup Window PoSt proof type for sector seal proof {}",
i64::from(precommit.seal_proof)
))?;
if sector_wpost_proof != info.window_post_proof_type {
return Err(actor_error!(ErrIllegalArgument, "sector Window PoSt proof type %d must match miner Window PoSt proof type {} (seal proof type {})", i64::from(sector_wpost_proof), i64::from(info.window_post_proof_type)));
}
if precommit.deal_ids.len() > deal_count_max as usize {
return Err(actor_error!(ErrIllegalArgument, "too many deals for sector {} > {}", precommit.deal_ids.len(), deal_count_max));
}
// Ensure total deal space does not exceed sector size.
let deal_weight = &deal_weights.sectors[i];
if deal_weight.deal_space > info.sector_size as u64 {
return Err(actor_error!(ErrIllegalArgument, "deals too large to fit in sector {} > {}", deal_weight.deal_space, info.sector_size));
}
if precommit.replace_capacity {
validate_replace_sector(state, store, precommit)?
}
// Estimate the sector weight using the current epoch as an estimate for activation,
// and compute the pre-commit deposit using that weight.
// The sector's power will be recalculated when it's proven.
let duration = precommit.expiration - curr_epoch;
let sector_weight = qa_power_for_weight(info.sector_size, duration, &deal_weight.deal_weight, &deal_weight.verified_deal_weight);
let deposit_req = pre_commit_deposit_for_power(&reward_stats.this_epoch_reward_smoothed,&power_total.quality_adj_power_smoothed , §or_weight);
// Build on-chain record.
chain_infos.push(SectorPreCommitOnChainInfo{
info: precommit.clone(),
pre_commit_deposit: deposit_req.clone(),
pre_commit_epoch: curr_epoch,
deal_weight: deal_weight.deal_weight.clone(),
verified_deal_weight: deal_weight.verified_deal_weight.clone(),
});
total_deposit_required += deposit_req;
// Calculate pre-commit cleanup
let msd = max_prove_commit_duration(precommit.seal_proof)
.ok_or_else(|| actor_error!(ErrIllegalArgument, "no max seal duration set for proof type: {}", i64::from(precommit.seal_proof)))?;
// PreCommitCleanUpDelay > 0 here is critical for the batch verification of proofs. Without it, if a proof arrived exactly on the
// due epoch, ProveCommitSector would accept it, then the expiry event would remove it, and then
// ConfirmSectorProofsValid would fail to find it.
let clean_up_bound = curr_epoch + msd + EXPIRED_PRE_COMMIT_CLEAN_UP_DELAY;
if let Some(cleanups) = clean_up_events.get_mut(&clean_up_bound) {
cleanups.push(precommit.sector_number);
} else {
clean_up_events.insert(clean_up_bound, vec![precommit.sector_number]);
}
}
// Batch update actor state.
if available_balance < total_deposit_required {
return Err(actor_error!(ErrInsufficientFunds, "insufficient funds {} for pre-commit deposit: {}", available_balance, total_deposit_required));
}
state.add_pre_commit_deposit(&total_deposit_required)
.map_err(|e|
actor_error!(
ErrIllegalState,
"failed to add pre-commit deposit {}: {}",
total_deposit_required, e
))?;
state.allocate_sector_numbers(store, §or_numbers, CollisionPolicy::DenyCollisions)
.map_err(|e|
e.wrap("failed to allocate sector numbers")
)?;
state.put_precommitted_sectors(store, chain_infos)
.map_err(|e|
e.downcast_default(ExitCode::ErrIllegalState, "failed to write pre-committed sectors")
)?;
state.add_pre_commit_clean_ups(store, clean_up_events)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to add pre-commit expiry to queue")
})?;
// Activate miner cron
needs_cron = !state.deadline_cron_active;
state.deadline_cron_active = true;
Ok(())
})?;
burn_funds(rt, fee_to_burn)?;
let state: State = rt.state()?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariant broken: {}", e),
)
})?;
if needs_cron {
let new_dl_info = state.deadline_info(curr_epoch);
enroll_cron_event(
rt,
new_dl_info.last(),
CronEventPayload {
event_type: CRON_EVENT_PROVING_DEADLINE,
},
)?;
}
Ok(())
}
/// Checks state of the corresponding sector pre-commitment, then schedules the proof to be verified in bulk
/// by the power actor.
/// If valid, the power actor will call ConfirmSectorProofsValid at the end of the same epoch as this message.
fn prove_commit_sector<BS, RT>(
rt: &mut RT,
params: ProveCommitSectorParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.validate_immediate_caller_accept_any()?;
if params.sector_number > MAX_SECTOR_NUMBER {
return Err(actor_error!(
ErrIllegalArgument,
"sector number greater than maximum"
));
}
let sector_number = params.sector_number;
let st: State = rt.state()?;
let precommit = st
.get_precommitted_sector(rt.store(), sector_number)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load pre-committed sector {}", sector_number),
)
})?
.ok_or_else(|| actor_error!(ErrNotFound, "no pre-commited sector {}", sector_number))?;
let max_proof_size = precommit.info.seal_proof.proof_size().map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to determine max proof size for sector {}: {}",
sector_number,
e
)
})?;
if params.proof.len() > max_proof_size {
return Err(actor_error!(
ErrIllegalArgument,
"sector prove-commit proof of size {} exceeds max size of {}",
params.proof.len(),
max_proof_size
));
}
let msd = max_prove_commit_duration(precommit.info.seal_proof).ok_or_else(|| {
actor_error!(
ErrIllegalState,
"no max seal duration set for proof type: {:?}",
precommit.info.seal_proof
)
})?;
let prove_commit_due = precommit.pre_commit_epoch + msd;
if rt.curr_epoch() > prove_commit_due {
return Err(actor_error!(
ErrIllegalArgument,
"commitment proof for {} too late at {}, due {}",
sector_number,
rt.curr_epoch(),
prove_commit_due
));
}
let svi = get_verify_info(
rt,
SealVerifyParams {
sealed_cid: precommit.info.sealed_cid,
interactive_epoch: precommit.pre_commit_epoch + PRE_COMMIT_CHALLENGE_DELAY,
seal_rand_epoch: precommit.info.seal_rand_epoch,
proof: params.proof,
deal_ids: precommit.info.deal_ids.clone(),
sector_num: precommit.info.sector_number,
registered_seal_proof: precommit.info.seal_proof,
},
)?;
rt.send(
*STORAGE_POWER_ACTOR_ADDR,
PowerMethod::SubmitPoRepForBulkVerify as u64,
Serialized::serialize(&svi)?,
BigInt::zero(),
)?;
Ok(())
}
fn confirm_sector_proofs_valid<BS, RT>(
rt: &mut RT,
params: ConfirmSectorProofsParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.validate_immediate_caller_is(iter::once(&*STORAGE_POWER_ACTOR_ADDR))?;
// This should be enforced by the power actor. We log here just in case
// something goes wrong.
if params.sectors.len() > MAX_MINER_PROVE_COMMITS_PER_EPOCH {
warn!(
"confirmed more prove commits in an epoch than permitted: {} > {}",
params.sectors.len(),
MAX_MINER_PROVE_COMMITS_PER_EPOCH
);
}
let st: State = rt.state()?;
let store = rt.store();
// This skips missing pre-commits.
let precommited_sectors = st
.find_precommitted_sectors(store, ¶ms.sectors)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to load pre-committed sectors",
)
})?;
confirm_sector_proofs_valid_internal(
rt,
precommited_sectors,
¶ms.reward_baseline_power,
¶ms.reward_smoothed,
¶ms.quality_adj_power_smoothed,
)
}
fn check_sector_proven<BS, RT>(
rt: &mut RT,
params: CheckSectorProvenParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.validate_immediate_caller_accept_any()?;
if params.sector_number > MAX_SECTOR_NUMBER {
return Err(actor_error!(
ErrIllegalArgument,
"sector number out of range"
));
}
let st: State = rt.state()?;
match st.get_sector(rt.store(), params.sector_number) {
Err(e) => Err(actor_error!(
ErrIllegalState,
"failed to load proven sector {}: {}",
params.sector_number,
e
)),
Ok(None) => Err(actor_error!(
ErrNotFound,
"sector {} not proven",
params.sector_number
)),
Ok(Some(_sector)) => Ok(()),
}
}
/// Changes the expiration epoch for a sector to a new, later one.
/// The sector must not be terminated or faulty.
/// The sector's power is recomputed for the new expiration.
fn extend_sector_expiration<BS, RT>(
rt: &mut RT,
mut params: ExtendSectorExpirationParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if params.extensions.len() as u64 > DELCARATIONS_MAX {
return Err(actor_error!(
ErrIllegalArgument,
"too many declarations {}, max {}",
params.extensions.len(),
DELCARATIONS_MAX
));
}
// limit the number of sectors declared at once
// https://github.com/filecoin-project/specs-actors/issues/416
let mut sector_count: u64 = 0;
for decl in &mut params.extensions {
if decl.deadline >= WPOST_PERIOD_DEADLINES as usize {
return Err(actor_error!(
ErrIllegalArgument,
"deadline {} not in range 0..{}",
decl.deadline,
WPOST_PERIOD_DEADLINES
));
}
let sectors = match decl.sectors.validate() {
Ok(sectors) => sectors,
Err(e) => {
return Err(actor_error!(
ErrIllegalArgument,
"failed to validate sectors for deadline {}, partition {}: {}",
decl.deadline,
decl.partition,
e
))
}
};
match sector_count.checked_add(sectors.len() as u64) {
Some(sum) => sector_count = sum,
None => {
return Err(actor_error!(
ErrIllegalArgument,
"sector bitfield integer overflow"
));
}
}
}
if sector_count > ADDRESSED_SECTORS_MAX {
return Err(actor_error!(
ErrIllegalArgument,
"too many sectors for declaration {}, max {}",
sector_count,
ADDRESSED_SECTORS_MAX
));
}
let curr_epoch = rt.curr_epoch();
let (power_delta, pledge_delta) = rt.transaction(|state: &mut State, rt| {
let info = get_miner_info(rt.store(), state)?;
let nv = rt.network_version();
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
let store = rt.store();
let mut deadlines = state
.load_deadlines(rt.store())
.map_err(|e| e.wrap("failed to load deadlines"))?;
// Group declarations by deadline, and remember iteration order.
let mut decls_by_deadline = HashMap::<usize, Vec<ExpirationExtension>>::new();
let mut deadlines_to_load = Vec::<usize>::new();
for decl in params.extensions {
decls_by_deadline
.entry(decl.deadline)
.or_insert_with(|| {
deadlines_to_load.push(decl.deadline);
Vec::new()
})
.push(decl);
}
let mut sectors = Sectors::load(rt.store(), &state.sectors).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load sectors array")
})?;
let mut power_delta = PowerPair::zero();
let mut pledge_delta = TokenAmount::zero();
for deadline_idx in deadlines_to_load {
let mut deadline = deadlines.load_deadline(store, deadline_idx).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load deadline {}", deadline_idx),
)
})?;
let mut partitions = deadline.partitions_amt(store).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load partitions for deadline {}", deadline_idx),
)
})?;
let quant = state.quant_spec_for_deadline(deadline_idx);
// Group modified partitions by epoch to which they are extended. Duplicates are ok.
let mut partitions_by_new_epoch = HashMap::<ChainEpoch, Vec<usize>>::new();
let mut epochs_to_reschedule = Vec::<ChainEpoch>::new();
for decl in decls_by_deadline.get_mut(&deadline_idx).unwrap() {
let key = PartitionKey {
deadline: deadline_idx,
partition: decl.partition,
};
let mut partition = partitions
.get(decl.partition)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load partition {:?}", key),
)
})?
.cloned()
.ok_or_else(|| actor_error!(ErrNotFound, "no such partition {:?}", key))?;
let old_sectors = sectors
.load_sector(&mut decl.sectors)
.map_err(|e| e.wrap("failed to load sectors"))?;
let new_sectors: Vec<SectorOnChainInfo> = old_sectors
.iter()
.map(|sector| {
if !can_extend_seal_proof_type(sector.seal_proof, nv) {
return Err(actor_error!(
ErrForbidden,
"cannot extend expiration for sector {} with unsupported \
seal type {:?}",
sector.sector_number,
sector.seal_proof
));
}
// This can happen if the sector should have already expired, but hasn't
// because the end of its deadline hasn't passed yet.
if sector.expiration < rt.curr_epoch() {
return Err(actor_error!(
ErrForbidden,
"cannot extend expiration for expired sector {} at {}",
sector.sector_number,
sector.expiration
));
}
if decl.new_expiration < sector.expiration {
return Err(actor_error!(
ErrIllegalArgument,
"cannot reduce sector {} expiration to {} from {}",
sector.sector_number,
decl.new_expiration,
sector.expiration
));
}
validate_expiration(
rt,
sector.activation,
decl.new_expiration,
sector.seal_proof,
)?;
// Remove "spent" deal weights
let new_deal_weight = (§or.deal_weight
* (sector.expiration - curr_epoch))
.div_floor(&BigInt::from(sector.expiration - sector.activation));
let new_verified_deal_weight = (§or.verified_deal_weight
* (sector.expiration - curr_epoch))
.div_floor(&BigInt::from(sector.expiration - sector.activation));
let mut sector = sector.clone();
sector.expiration = decl.new_expiration;
sector.deal_weight = new_deal_weight;
sector.verified_deal_weight = new_verified_deal_weight;
Ok(sector)
})
.collect::<Result<_, _>>()?;
// Overwrite sector infos.
sectors.store(new_sectors.clone()).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to update sectors {:?}", decl.sectors),
)
})?;
// Remove old sectors from partition and assign new sectors.
let (partition_power_delta, partition_pledge_delta) = partition
.replace_sectors(store, &old_sectors, &new_sectors, info.sector_size, quant)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to replace sector expirations at {:?}", key),
)
})?;
power_delta += &partition_power_delta;
pledge_delta += partition_pledge_delta; // expected to be zero, see note below.
partitions.set(decl.partition, partition).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to save partition {:?}", key),
)
})?;
// Record the new partition expiration epoch for setting outside this loop
// over declarations.
let prev_epoch_partitions = partitions_by_new_epoch.entry(decl.new_expiration);
let not_exists = matches!(prev_epoch_partitions, Entry::Vacant(_));
// Add declaration partition
prev_epoch_partitions
.or_insert_with(Vec::new)
.push(decl.partition);
if not_exists {
// reschedule epoch if the partition for new epoch didn't already exist
epochs_to_reschedule.push(decl.new_expiration);
}
}
deadline.partitions = partitions.flush().map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to save partitions for deadline {}", deadline_idx),
)
})?;
// Record partitions in deadline expiration queue
for epoch in epochs_to_reschedule {
let p_idxs = partitions_by_new_epoch.get(&epoch).unwrap();
deadline
.add_expiration_partitions(store, epoch, p_idxs, quant)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!(
"failed to add expiration partitions to \
deadline {} epoch {}",
deadline_idx, epoch
),
)
})?;
}
deadlines
.update_deadline(store, deadline_idx, &deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to save deadline {}", deadline_idx),
)
})?;
}
state.sectors = sectors.amt.flush().map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save sectors")
})?;
state.save_deadlines(store, deadlines).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save deadlines")
})?;
Ok((power_delta, pledge_delta))
})?;
request_update_power(rt, power_delta)?;
// Note: the pledge delta is expected to be zero, since pledge is not re-calculated for the extension.
// But in case that ever changes, we can do the right thing here.
notify_pledge_changed(rt, &pledge_delta)?;
Ok(())
}
/// Marks some sectors as terminated at the present epoch, earlier than their
/// scheduled termination, and adds these sectors to the early termination queue.
/// This method then processes up to AddressedSectorsMax sectors and
/// AddressedPartitionsMax partitions from the early termination queue,
/// terminating deals, paying fines, and returning pledge collateral. While
/// sectors remain in this queue:
///
/// 1. The miner will be unable to withdraw funds.
/// 2. The chain will process up to AddressedSectorsMax sectors and
/// AddressedPartitionsMax per epoch until the queue is empty.
///
/// The sectors are immediately ignored for Window PoSt proofs, and should be
/// masked in the same way as faulty sectors. A miner may not terminate sectors in the
/// current deadline or the next deadline to be proven.
///
/// This function may be invoked with no new sectors to explicitly process the
/// next batch of sectors.
fn terminate_sectors<BS, RT>(
rt: &mut RT,
params: TerminateSectorsParams,
) -> Result<TerminateSectorsReturn, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
// Note: this cannot terminate pre-committed but un-proven sectors.
// They must be allowed to expire (and deposit burnt).
if params.terminations.len() as u64 > DELCARATIONS_MAX {
return Err(actor_error!(
ErrIllegalArgument,
"too many declarations when terminating sectors: {} > {}",
params.terminations.len(),
DELCARATIONS_MAX
));
}
let mut to_process = DeadlineSectorMap::new();
for term in params.terminations {
let deadline = term.deadline;
let partition = term.partition;
to_process
.add(deadline, partition, term.sectors)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed to process deadline {}, partition {}: {}",
deadline,
partition,
e
)
})?;
}
to_process
.check(ADDRESSED_PARTITIONS_MAX, ADDRESSED_SECTORS_MAX)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"cannot process requested parameters: {}",
e
)
})?;
let (had_early_terminations, power_delta) = rt.transaction(|state: &mut State, rt| {
let had_early_terminations = have_pending_early_terminations(state);
let info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
let store = rt.store();
let curr_epoch = rt.curr_epoch();
let mut power_delta = PowerPair::zero();
let mut deadlines = state
.load_deadlines(store)
.map_err(|e| e.wrap("failed to load deadlines"))?;
// We're only reading the sectors, so there's no need to save this back.
// However, we still want to avoid re-loading this array per-partition.
let sectors = Sectors::load(store, &state.sectors).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load sectors")
})?;
for (deadline_idx, partition_sectors) in to_process.iter() {
// If the deadline the current or next deadline to prove, don't allow terminating sectors.
// We assume that deadlines are immutable when being proven.
if !deadline_is_mutable(
state.current_proving_period_start(curr_epoch),
deadline_idx,
curr_epoch,
) {
return Err(actor_error!(
ErrIllegalArgument,
"cannot terminate sectors in immutable deadline {}",
deadline_idx
));
}
let quant = state.quant_spec_for_deadline(deadline_idx);
let mut deadline = deadlines.load_deadline(store, deadline_idx).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load deadline {}", deadline_idx),
)
})?;
let removed_power = deadline
.terminate_sectors(
store,
§ors,
curr_epoch,
partition_sectors,
info.sector_size,
quant,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to terminate sectors in deadline {}", deadline_idx),
)
})?;
state.early_terminations.set(deadline_idx as usize);
power_delta -= &removed_power;
deadlines
.update_deadline(store, deadline_idx, &deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to update deadline {}", deadline_idx),
)
})?;
}
state.save_deadlines(store, deadlines).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save deadlines")
})?;
Ok((had_early_terminations, power_delta))
})?;
let epoch_reward = request_current_epoch_block_reward(rt)?;
let pwr_total = request_current_total_power(rt)?;
// Now, try to process these sectors.
let more = process_early_terminations(
rt,
&epoch_reward.this_epoch_reward_smoothed,
&pwr_total.quality_adj_power_smoothed,
)?;
if more && !had_early_terminations {
// We have remaining terminations, and we didn't _previously_
// have early terminations to process, schedule a cron job.
// NOTE: This isn't quite correct. If we repeatedly fill, empty,
// fill, and empty, the queue, we'll keep scheduling new cron
// jobs. However, in practice, that shouldn't be all that bad.
schedule_early_termination_work(rt)?;
}
let state: State = rt.state()?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariant broken: {}", e),
)
})?;
request_update_power(rt, power_delta)?;
Ok(TerminateSectorsReturn { done: !more })
}
fn declare_faults<BS, RT>(rt: &mut RT, params: DeclareFaultsParams) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if params.faults.len() as u64 > DELCARATIONS_MAX {
return Err(actor_error!(
ErrIllegalArgument,
"too many fault declarations for a single message: {} > {}",
params.faults.len(),
DELCARATIONS_MAX
));
}
let mut to_process = DeadlineSectorMap::new();
for term in params.faults {
let deadline = term.deadline;
let partition = term.partition;
to_process
.add(deadline, partition, term.sectors)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed to process deadline {}, partition {}: {}",
deadline,
partition,
e
)
})?;
}
to_process
.check(ADDRESSED_PARTITIONS_MAX, ADDRESSED_SECTORS_MAX)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"cannot process requested parameters: {}",
e
)
})?;
let power_delta = rt.transaction(|state: &mut State, rt| {
let info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
let store = rt.store();
let mut deadlines = state
.load_deadlines(store)
.map_err(|e| e.wrap("failed to load deadlines"))?;
let sectors = Sectors::load(store, &state.sectors).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load sectors array")
})?;
let mut new_fault_power_total = PowerPair::zero();
let curr_epoch = rt.curr_epoch();
for (deadline_idx, partition_map) in to_process.iter() {
let target_deadline = declaration_deadline_info(
state.current_proving_period_start(curr_epoch),
deadline_idx,
curr_epoch,
)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"invalid fault declaration deadline {}: {}",
deadline_idx,
e
)
})?;
validate_fr_declaration_deadline(&target_deadline).map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed fault declaration at deadline {}: {}",
deadline_idx,
e
)
})?;
let mut deadline = deadlines.load_deadline(store, deadline_idx).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load deadline {}", deadline_idx),
)
})?;
let fault_expiration_epoch = target_deadline.last() + FAULT_MAX_AGE;
let deadline_power_delta = deadline
.record_faults(
store,
§ors,
info.sector_size,
target_deadline.quant_spec(),
fault_expiration_epoch,
partition_map,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to declare faults for deadline {}", deadline_idx),
)
})?;
deadlines
.update_deadline(store, deadline_idx, &deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to store deadline {} partitions", deadline_idx),
)
})?;
new_fault_power_total += &deadline_power_delta;
}
state.save_deadlines(store, deadlines).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save deadlines")
})?;
Ok(new_fault_power_total)
})?;
// Remove power for new faulty sectors.
// NOTE: It would be permissible to delay the power loss until the deadline closes, but that would require
// additional accounting state.
// https://github.com/filecoin-project/specs-actors/issues/414
request_update_power(rt, power_delta)?;
// Payment of penalty for declared faults is deferred to the deadline cron.
Ok(())
}
fn declare_faults_recovered<BS, RT>(
rt: &mut RT,
params: DeclareFaultsRecoveredParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if params.recoveries.len() as u64 > DELCARATIONS_MAX {
return Err(actor_error!(
ErrIllegalArgument,
"too many recovery declarations for a single message: {} > {}",
params.recoveries.len(),
DELCARATIONS_MAX
));
}
let mut to_process = DeadlineSectorMap::new();
for term in params.recoveries {
let deadline = term.deadline;
let partition = term.partition;
to_process
.add(deadline, partition, term.sectors)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed to process deadline {}, partition {}: {}",
deadline,
partition,
e
)
})?;
}
to_process
.check(ADDRESSED_PARTITIONS_MAX, ADDRESSED_SECTORS_MAX)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"cannot process requested parameters: {}",
e
)
})?;
let fee_to_burn = rt.transaction(|state: &mut State, rt| {
// Verify unlocked funds cover both InitialPledgeRequirement and FeeDebt
// and repay fee debt now.
let fee_to_burn = repay_debts_or_abort(rt, state)?;
let info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
if consensus_fault_active(&info, rt.curr_epoch()) {
return Err(actor_error!(
ErrForbidden,
"recovery not allowed during active consensus fault"
));
}
let store = rt.store();
let mut deadlines = state
.load_deadlines(store)
.map_err(|e| e.wrap("failed to load deadlines"))?;
let sectors = Sectors::load(store, &state.sectors).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load sectors array")
})?;
let curr_epoch = rt.curr_epoch();
for (deadline_idx, partition_map) in to_process.iter() {
let target_deadline = declaration_deadline_info(
state.current_proving_period_start(curr_epoch),
deadline_idx,
curr_epoch,
)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"invalid recovery declaration deadline {}: {}",
deadline_idx,
e
)
})?;
validate_fr_declaration_deadline(&target_deadline).map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed recovery declaration at deadline {}: {}",
deadline_idx,
e
)
})?;
let mut deadline = deadlines.load_deadline(store, deadline_idx).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load deadline {}", deadline_idx),
)
})?;
deadline
.declare_faults_recovered(store, §ors, info.sector_size, partition_map)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to declare recoveries for deadline {}", deadline_idx),
)
})?;
deadlines
.update_deadline(store, deadline_idx, &deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to store deadline {}", deadline_idx),
)
})?;
}
state.save_deadlines(store, deadlines).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to save deadlines")
})?;
Ok(fee_to_burn)
})?;
burn_funds(rt, fee_to_burn)?;
let state: State = rt.state()?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
// Power is not restored yet, but when the recovered sectors are successfully PoSted.
Ok(())
}
/// Compacts a number of partitions at one deadline by removing terminated sectors, re-ordering the remaining sectors,
/// and assigning them to new partitions so as to completely fill all but one partition with live sectors.
/// The addressed partitions are removed from the deadline, and new ones appended.
/// The final partition in the deadline is always included in the compaction, whether or not explicitly requested.
/// Removed sectors are removed from state entirely.
/// May not be invoked if the deadline has any un-processed early terminations.
fn compact_partitions<BS, RT>(
rt: &mut RT,
mut params: CompactPartitionsParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if params.deadline >= WPOST_PERIOD_DEADLINES as usize {
return Err(actor_error!(
ErrIllegalArgument,
"invalid deadline {}",
params.deadline
));
}
let partitions = params.partitions.validate().map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed to parse partitions bitfield: {}",
e
)
})?;
let partition_count = partitions.len() as u64;
let params_deadline = params.deadline;
rt.transaction(|state: &mut State, rt| {
let info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
let store = rt.store();
if !deadline_available_for_compaction(
state.current_proving_period_start(rt.curr_epoch()),
params_deadline,
rt.curr_epoch(),
) {
return Err(actor_error!(
ErrForbidden,
"cannot compact deadline {} during its challenge window, \
or the prior challenge window,
or before {} epochs have passed since its last challenge window ended",
params_deadline,
WPOST_DISPUTE_WINDOW
));
}
let submission_partition_limit =
load_partitions_sectors_max(info.window_post_partition_sectors);
if partition_count > submission_partition_limit {
return Err(actor_error!(
ErrIllegalArgument,
"too many partitions {}, limit {}",
partition_count,
submission_partition_limit
));
}
let quant = state.quant_spec_for_deadline(params_deadline);
let mut deadlines = state
.load_deadlines(store)
.map_err(|e| e.wrap("failed to load deadlines"))?;
let mut deadline = deadlines
.load_deadline(store, params_deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load deadline {}", params_deadline),
)
})?;
let (live, dead, removed_power) = deadline
.remove_partitions(store, partitions, quant)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!(
"failed to remove partitions from deadline {}",
params_deadline
),
)
})?;
state.delete_sectors(store, &dead).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to delete dead sectors")
})?;
let sectors = state.load_sector_infos(store, &live).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load moved sectors")
})?;
let proven = true;
let added_power = deadline
.add_sectors(
store,
info.window_post_partition_sectors,
proven,
§ors,
info.sector_size,
quant,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to add back moved sectors",
)
})?;
if removed_power != added_power {
return Err(actor_error!(
ErrIllegalState,
"power changed when compacting partitions: was {:?}, is now {:?}",
removed_power,
added_power
));
}
deadlines
.update_deadline(store, params_deadline, &deadline)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to update deadline {}", params_deadline),
)
})?;
state.save_deadlines(store, deadlines).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to save deadline {}", params_deadline),
)
})?;
Ok(())
})?;
Ok(())
}
/// Compacts sector number allocations to reduce the size of the allocated sector
/// number bitfield.
///
/// When allocating sector numbers sequentially, or in sequential groups, this
/// bitfield should remain fairly small. However, if the bitfield grows large
/// enough such that PreCommitSector fails (or becomes expensive), this method
/// can be called to mask out (throw away) entire ranges of unused sector IDs.
/// For example, if sectors 1-99 and 101-200 have been allocated, sector number
/// 99 can be masked out to collapse these two ranges into one.
fn compact_sector_numbers<BS, RT>(
rt: &mut RT,
mut params: CompactSectorNumbersParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let mask_sector_numbers = params
.mask_sector_numbers
.validate()
.map_err(|e| actor_error!(ErrIllegalArgument, "invalid mask bitfield: {}", e))?;
let last_sector_number = mask_sector_numbers
.iter()
.last()
.ok_or_else(|| actor_error!(ErrIllegalArgument, "invalid mask bitfield"))?
as SectorNumber;
#[allow(clippy::absurd_extreme_comparisons)]
if last_sector_number > MAX_SECTOR_NUMBER {
return Err(actor_error!(
ErrIllegalArgument,
"masked sector number {} exceeded max sector number",
last_sector_number
));
}
rt.transaction(|state: &mut State, rt| {
let info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
state.allocate_sector_numbers(
rt.store(),
mask_sector_numbers,
CollisionPolicy::AllowCollisions,
)
})?;
Ok(())
}
/// Locks up some amount of a the miner's unlocked balance (including funds received alongside the invoking message).
fn apply_rewards<BS, RT>(rt: &mut RT, params: ApplyRewardParams) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if params.reward.is_negative() {
return Err(actor_error!(
ErrIllegalArgument,
"cannot lock up a negative amount of funds"
));
}
if params.penalty.is_negative() {
return Err(actor_error!(
ErrIllegalArgument,
"cannot penalize a negative amount of funds"
));
}
let (pledge_delta_total, to_burn) = rt.transaction(|st: &mut State, rt| {
let mut pledge_delta_total = TokenAmount::zero();
rt.validate_immediate_caller_is(std::iter::once(&*REWARD_ACTOR_ADDR))?;
let (reward_to_lock, locked_reward_vesting_spec) =
locked_reward_from_reward(params.reward);
// This ensures the miner has sufficient funds to lock up amountToLock.
// This should always be true if reward actor sends reward funds with the message.
let unlocked_balance =
st.get_unlocked_balance(&rt.current_balance()?)
.map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to calculate unlocked balance: {}",
e
)
})?;
if unlocked_balance < reward_to_lock {
return Err(actor_error!(
ErrInsufficientFunds,
"insufficient funds to lock, available: {}, requested: {}",
unlocked_balance,
reward_to_lock
));
}
let newly_vested = st
.add_locked_funds(
rt.store(),
rt.curr_epoch(),
&reward_to_lock,
locked_reward_vesting_spec,
)
.map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to lock funds in vesting table: {}",
e
)
})?;
pledge_delta_total -= &newly_vested;
pledge_delta_total += &reward_to_lock;
st.apply_penalty(¶ms.penalty)
.map_err(|e| actor_error!(ErrIllegalState, "failed to apply penalty: {}", e))?;
// Attempt to repay all fee debt in this call. In most cases the miner will have enough
// funds in the *reward alone* to cover the penalty. In the rare case a miner incurs more
// penalty than it can pay for with reward and existing funds, it will go into fee debt.
let (penalty_from_vesting, penalty_from_balance) = st
.repay_partial_debt_in_priority_order(
rt.store(),
rt.curr_epoch(),
&rt.current_balance()?,
)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to repay penalty")
})?;
pledge_delta_total -= &penalty_from_vesting;
let to_burn = penalty_from_vesting + penalty_from_balance;
Ok((pledge_delta_total, to_burn))
})?;
notify_pledge_changed(rt, &pledge_delta_total)?;
burn_funds(rt, to_burn)?;
let st: State = rt.state()?;
st.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(())
}
fn report_consensus_fault<BS, RT>(
rt: &mut RT,
params: ReportConsensusFaultParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
// Note: only the first report of any fault is processed because it sets the
// ConsensusFaultElapsed state variable to an epoch after the fault, and reports prior to
// that epoch are no longer valid
rt.validate_immediate_caller_type(CALLER_TYPES_SIGNABLE.iter())?;
let reporter = *rt.message().caller();
let fault = rt
.verify_consensus_fault(¶ms.header1, ¶ms.header2, ¶ms.header_extra)
.map_err(|e| e.downcast_default(ExitCode::ErrIllegalArgument, "fault not verified"))?
.ok_or_else(|| actor_error!(ErrIllegalArgument, "No consensus fault found"))?;
if fault.target != *rt.message().receiver() {
return Err(actor_error!(
ErrIllegalArgument,
"fault by {} reported to miner {}",
fault.target,
rt.message().receiver()
));
}
// Elapsed since the fault (i.e. since the higher of the two blocks)
let fault_age = rt.curr_epoch() - fault.epoch;
if fault_age <= 0 {
return Err(actor_error!(
ErrIllegalArgument,
"invalid fault epoch {} ahead of current {}",
fault.epoch,
rt.curr_epoch()
));
}
// Reward reporter with a share of the miner's current balance.
let reward_stats = request_current_epoch_block_reward(rt)?;
// The policy amounts we should burn and send to reporter
// These may differ from actual funds send when miner goes into fee debt
let this_epoch_reward = reward_stats.this_epoch_reward_smoothed.estimate();
let fault_penalty = consensus_fault_penalty(this_epoch_reward.clone());
let slasher_reward = reward_for_consensus_slash_report(&this_epoch_reward);
let mut pledge_delta = TokenAmount::from(0);
let (burn_amount, reward_amount) = rt.transaction(|st: &mut State, rt| {
let mut info = get_miner_info(rt.store(), st)?;
// Verify miner hasn't already been faulted
if fault.epoch < info.consensus_fault_elapsed {
return Err(actor_error!(
ErrForbidden,
"fault epoch {} is too old, last exclusion period ended at {}",
fault.epoch,
info.consensus_fault_elapsed
));
}
st.apply_penalty(&fault_penalty).map_err(|e| {
actor_error!(ErrIllegalState, format!("failed to apply penalty: {}", e))
})?;
// Pay penalty
let (penalty_from_vesting, penalty_from_balance) = st
.repay_partial_debt_in_priority_order(
rt.store(),
rt.curr_epoch(),
&rt.current_balance()?,
)
.map_err(|e| e.downcast_default(ExitCode::ErrIllegalState, "failed to pay fees"))?;
let mut burn_amount = &penalty_from_vesting + &penalty_from_balance;
pledge_delta -= penalty_from_vesting;
// clamp reward at funds burnt
let reward_amount = std::cmp::min(&burn_amount, &slasher_reward).clone();
burn_amount -= &reward_amount;
info.consensus_fault_elapsed = rt.curr_epoch() + CONSENSUS_FAULT_INELIGIBILITY_DURATION;
st.save_info(rt.store(), &info).map_err(|e| {
e.downcast_default(ExitCode::ErrSerialization, "failed to save miner info")
})?;
Ok((burn_amount, reward_amount))
})?;
if let Err(e) = rt.send(reporter, METHOD_SEND, Serialized::default(), reward_amount) {
error!("failed to send reward: {}", e);
}
burn_funds(rt, burn_amount)?;
notify_pledge_changed(rt, &pledge_delta)?;
let state: State = rt.state()?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(())
}
fn withdraw_balance<BS, RT>(
rt: &mut RT,
params: WithdrawBalanceParams,
) -> Result<WithdrawBalanceReturn, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if params.amount_requested.is_negative() {
return Err(actor_error!(
ErrIllegalArgument,
"negative fund requested for withdrawal: {}",
params.amount_requested
));
}
let (info, newly_vested, fee_to_burn, available_balance, state) =
rt.transaction(|state: &mut State, rt| {
let info = get_miner_info(rt.store(), state)?;
// Only the owner is allowed to withdraw the balance as it belongs to/is controlled by the owner
// and not the worker.
rt.validate_immediate_caller_is(&[info.owner])?;
// Ensure we don't have any pending terminations.
if !state.early_terminations.is_empty() {
return Err(actor_error!(
ErrForbidden,
"cannot withdraw funds while {} deadlines have terminated sectors \
with outstanding fees",
state.early_terminations.len()
));
}
// Unlock vested funds so we can spend them.
let newly_vested = state
.unlock_vested_funds(rt.store(), rt.curr_epoch())
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "Failed to vest fund")
})?;
// available balance already accounts for fee debt so it is correct to call
// this before RepayDebts. We would have to
// subtract fee debt explicitly if we called this after.
let available_balance = state
.get_available_balance(&rt.current_balance()?)
.map_err(|e| {
actor_error!(
ErrIllegalState,
format!("failed to calculate available balance: {}", e)
)
})?;
// Verify unlocked funds cover both InitialPledgeRequirement and FeeDebt
// and repay fee debt now.
let fee_to_burn = repay_debts_or_abort(rt, state)?;
Ok((
info,
newly_vested,
fee_to_burn,
available_balance,
state.clone(),
))
})?;
let amount_withdrawn = std::cmp::min(&available_balance, ¶ms.amount_requested);
if amount_withdrawn.is_negative() {
return Err(actor_error!(
ErrIllegalState,
"negative amount to withdraw: {}",
amount_withdrawn
));
}
if amount_withdrawn > &available_balance {
return Err(actor_error!(
ErrIllegalState,
"amount to withdraw {} < available {}",
amount_withdrawn,
available_balance
));
}
if amount_withdrawn.is_positive() {
rt.send(
info.owner,
METHOD_SEND,
Serialized::default(),
amount_withdrawn.clone(),
)?;
}
burn_funds(rt, fee_to_burn)?;
notify_pledge_changed(rt, &newly_vested.neg())?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(WithdrawBalanceReturn {
amount_withdrawn: amount_withdrawn.clone(),
})
}
fn repay_debt<BS, RT>(rt: &mut RT) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let (from_vesting, from_balance, state) = rt.transaction(|state: &mut State, rt| {
let info = get_miner_info(rt.store(), state)?;
rt.validate_immediate_caller_is(
info.control_addresses
.iter()
.chain(&[info.worker, info.owner]),
)?;
// Repay as much fee debt as possible.
let (from_vesting, from_balance) = state
.repay_partial_debt_in_priority_order(
rt.store(),
rt.curr_epoch(),
&rt.current_balance()?,
)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to unlock fee debt")
})?;
Ok((from_vesting, from_balance, state.clone()))
})?;
let burn_amount = from_balance + &from_vesting;
notify_pledge_changed(rt, &from_vesting.neg())?;
burn_funds(rt, burn_amount)?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(())
}
fn on_deferred_cron_event<BS, RT>(
rt: &mut RT,
params: DeferredCronEventParams,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
rt.validate_immediate_caller_is(std::iter::once(&*STORAGE_POWER_ACTOR_ADDR))?;
let payload: CronEventPayload = from_slice(¶ms.event_payload).map_err(|e| {
actor_error!(
ErrIllegalState,
format!(
"failed to unmarshal miner cron payload into expected structure: {}",
e
)
)
})?;
match payload.event_type {
CRON_EVENT_PROVING_DEADLINE => handle_proving_deadline(
rt,
¶ms.reward_smoothed,
¶ms.quality_adj_power_smoothed,
)?,
CRON_EVENT_PROCESS_EARLY_TERMINATIONS => {
if process_early_terminations(
rt,
¶ms.reward_smoothed,
¶ms.quality_adj_power_smoothed,
)? {
schedule_early_termination_work(rt)?
}
}
_ => {
error!(
"onDeferredCronEvent invalid event type: {}",
payload.event_type
);
}
};
let state: State = rt.state()?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariants broken: {}", e),
)
})?;
Ok(())
}
}
// TODO: We're using the current power+epoch reward. Technically, we
// should use the power/reward at the time of termination.
// https://github.com/filecoin-project/specs-actors/v6/pull/648
fn process_early_terminations<BS, RT>(
rt: &mut RT,
reward_smoothed: &FilterEstimate,
quality_adj_power_smoothed: &FilterEstimate,
) -> Result</* more */ bool, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let (result, more, deals_to_terminate, penalty, pledge_delta) =
rt.transaction(|state: &mut State, rt| {
let store = rt.store();
let (result, more) = state
.pop_early_terminations(store, ADDRESSED_PARTITIONS_MAX, ADDRESSED_SECTORS_MAX)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to pop early terminations",
)
})?;
// Nothing to do, don't waste any time.
// This can happen if we end up processing early terminations
// before the cron callback fires.
if result.is_empty() {
info!("no early terminations (maybe cron callback hasn't happened yet?)");
return Ok((
result,
more,
Vec::new(),
TokenAmount::zero(),
TokenAmount::zero(),
));
}
let info = get_miner_info(rt.store(), state)?;
let sectors = Sectors::load(store, &state.sectors).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to load sectors array")
})?;
let mut total_initial_pledge = TokenAmount::zero();
let mut deals_to_terminate =
Vec::<OnMinerSectorsTerminateParams>::with_capacity(result.sectors.len());
let mut penalty = TokenAmount::zero();
for (epoch, sector_numbers) in result.iter() {
let sectors = sectors
.load_sector(sector_numbers)
.map_err(|e| e.wrap("failed to load sector infos"))?;
penalty += termination_penalty(
info.sector_size,
epoch,
reward_smoothed,
quality_adj_power_smoothed,
§ors,
);
// estimate ~one deal per sector.
let mut deal_ids = Vec::<DealID>::with_capacity(sectors.len());
for sector in sectors {
deal_ids.extend(sector.deal_ids);
total_initial_pledge += sector.initial_pledge;
}
let params = OnMinerSectorsTerminateParams { epoch, deal_ids };
deals_to_terminate.push(params);
}
// Pay penalty
state
.apply_penalty(&penalty)
.map_err(|e| actor_error!(ErrIllegalState, "failed to apply penalty: {}", e))?;
// Remove pledge requirement.
let mut pledge_delta = -total_initial_pledge;
state.add_initial_pledge(&pledge_delta).map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to add initial pledge {}: {}",
pledge_delta,
e
)
})?;
// Use unlocked pledge to pay down outstanding fee debt
let (penalty_from_vesting, penalty_from_balance) = state
.repay_partial_debt_in_priority_order(
rt.store(),
rt.curr_epoch(),
&rt.current_balance()?,
)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to repay penalty")
})?;
penalty = &penalty_from_vesting + penalty_from_balance;
pledge_delta -= penalty_from_vesting;
Ok((result, more, deals_to_terminate, penalty, pledge_delta))
})?;
// We didn't do anything, abort.
if result.is_empty() {
info!("no early terminations");
return Ok(more);
}
// Burn penalty.
log::debug!(
"storage provider {} penalized {} for sector termination",
rt.message().receiver(),
penalty
);
burn_funds(rt, penalty)?;
// Return pledge.
notify_pledge_changed(rt, &pledge_delta)?;
// Terminate deals.
for params in deals_to_terminate {
request_terminate_deals(rt, params.epoch, params.deal_ids)?;
}
// reschedule cron worker, if necessary.
Ok(more)
}
/// Invoked at the end of the last epoch for each proving deadline.
fn handle_proving_deadline<BS, RT>(
rt: &mut RT,
reward_smoothed: &FilterEstimate,
quality_adj_power_smoothed: &FilterEstimate,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let curr_epoch = rt.curr_epoch();
let mut had_early_terminations = false;
let mut power_delta_total = PowerPair::zero();
let mut penalty_total = TokenAmount::zero();
let mut pledge_delta_total = TokenAmount::zero();
let mut continue_cron = false;
let state: State = rt.transaction(|state: &mut State, rt| {
// Vest locked funds.
// This happens first so that any subsequent penalties are taken
// from locked vesting funds before funds free this epoch.
let newly_vested = state
.unlock_vested_funds(rt.store(), rt.curr_epoch())
.map_err(|e| e.downcast_default(ExitCode::ErrIllegalState, "failed to vest funds"))?;
pledge_delta_total -= newly_vested;
// Process pending worker change if any
let mut info = get_miner_info(rt.store(), state)?;
process_pending_worker(&mut info, rt, state)?;
let deposit_to_burn = state
.cleanup_expired_pre_commits(rt.store(), rt.curr_epoch())
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to expire pre-committed sectors",
)
})?;
state
.apply_penalty(&deposit_to_burn)
.map_err(|e| actor_error!(ErrIllegalState, "failed to apply penalty: {}", e))?;
log::debug!(
"storage provider {} penalized {} for expired pre commits",
rt.message().receiver(),
deposit_to_burn
);
// Record whether or not we _had_ early terminations in the queue before this method.
// That way, don't re-schedule a cron callback if one is already scheduled.
had_early_terminations = have_pending_early_terminations(state);
let result = state
.advance_deadline(rt.store(), rt.curr_epoch())
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to advance deadline")
})?;
// Faults detected by this missed PoSt pay no penalty, but sectors that were already faulty
// and remain faulty through this deadline pay the fault fee.
let penalty_target = pledge_penalty_for_continued_fault(
reward_smoothed,
quality_adj_power_smoothed,
&result.previously_faulty_power.qa,
);
power_delta_total += &result.power_delta;
pledge_delta_total += &result.pledge_delta;
state
.apply_penalty(&penalty_target)
.map_err(|e| actor_error!(ErrIllegalState, "failed to apply penalty: {}", e))?;
log::debug!(
"storage provider {} penalized {} for continued fault",
rt.message().receiver(),
penalty_target
);
let (penalty_from_vesting, penalty_from_balance) = state
.repay_partial_debt_in_priority_order(
rt.store(),
rt.curr_epoch(),
&rt.current_balance()?,
)
.map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to unlock penalty")
})?;
penalty_total = &penalty_from_vesting + penalty_from_balance;
pledge_delta_total -= penalty_from_vesting;
continue_cron = state.continue_deadline_cron();
if !continue_cron {
state.deadline_cron_active = false;
}
Ok(state.clone())
})?;
// Remove power for new faults, and burn penalties.
request_update_power(rt, power_delta_total)?;
burn_funds(rt, penalty_total)?;
notify_pledge_changed(rt, &pledge_delta_total)?;
// Schedule cron callback for next deadline's last epoch.
if continue_cron {
let new_deadline_info = state.deadline_info(curr_epoch + 1);
enroll_cron_event(
rt,
new_deadline_info.last(),
CronEventPayload {
event_type: CRON_EVENT_PROVING_DEADLINE,
},
)?;
} else {
info!(
"miner {} going inactive, deadline cron discontinued",
rt.message().receiver()
)
}
// Record whether or not we _have_ early terminations now.
let has_early_terminations = have_pending_early_terminations(&state);
// If we didn't have pending early terminations before, but we do now,
// handle them at the next epoch.
if !had_early_terminations && has_early_terminations {
// First, try to process some of these terminations.
if process_early_terminations(rt, reward_smoothed, quality_adj_power_smoothed)? {
// If that doesn't work, just defer till the next epoch.
schedule_early_termination_work(rt)?;
}
// Note: _don't_ process early terminations if we had a cron
// callback already scheduled. In that case, we'll already have
// processed AddressedSectorsMax terminations this epoch.
}
Ok(())
}
/// Check expiry is exactly *the epoch before* the start of a proving period.
fn validate_expiration<BS, RT>(
rt: &RT,
activation: ChainEpoch,
expiration: ChainEpoch,
seal_proof: RegisteredSealProof,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
// Expiration must be after activation. Check this explicitly to avoid an underflow below.
if expiration <= activation {
return Err(actor_error!(
ErrIllegalArgument,
"sector expiration {} must be after activation {}",
expiration,
activation
));
}
// expiration cannot be less than minimum after activation
if expiration - activation < MIN_SECTOR_EXPIRATION {
return Err(actor_error!(
ErrIllegalArgument,
"invalid expiration {}, total sector lifetime ({}) must exceed {} after activation {}",
expiration,
expiration - activation,
MIN_SECTOR_EXPIRATION,
activation
));
}
// expiration cannot exceed MaxSectorExpirationExtension from now
if expiration > rt.curr_epoch() + MAX_SECTOR_EXPIRATION_EXTENSION {
return Err(actor_error!(
ErrIllegalArgument,
"invalid expiration {}, cannot be more than {} past current epoch {}",
expiration,
MAX_SECTOR_EXPIRATION_EXTENSION,
rt.curr_epoch()
));
}
// total sector lifetime cannot exceed SectorMaximumLifetime for the sector's seal proof
let max_lifetime = seal_proof_sector_maximum_lifetime(seal_proof, rt.network_version())
.ok_or_else(|| {
actor_error!(
ErrIllegalArgument,
"unrecognized seal proof type {:?}",
seal_proof
)
})?;
if expiration - activation > max_lifetime {
return Err(actor_error!(
ErrIllegalArgument,
"invalid expiration {}, total sector lifetime ({}) cannot exceed {} after activation {}",
expiration,
expiration - activation,
max_lifetime,
activation
));
}
Ok(())
}
fn validate_replace_sector<BS>(
state: &State,
store: &BS,
params: &SectorPreCommitInfo,
) -> Result<(), ActorError>
where
BS: BlockStore,
{
let replace_sector = state
.get_sector(store, params.replace_sector_number)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to load sector {}", params.replace_sector_number),
)
})?
.ok_or_else(|| {
actor_error!(
ErrNotFound,
"no such sector {} to replace",
params.replace_sector_number
)
})?;
if !replace_sector.deal_ids.is_empty() {
return Err(actor_error!(
ErrIllegalArgument,
"cannot replace sector {} which has deals",
params.replace_sector_number
));
}
// From network version 7, the new sector's seal type must have the same Window PoSt proof type as the one
// being replaced, rather than be exactly the same seal type.
// This permits replacing sectors with V1 seal types with V1_1 seal types.
let replace_w_post_proof = replace_sector
.seal_proof
.registered_window_post_proof()
.map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to lookup Window PoSt proof type for sector seal proof {:?}: {}",
replace_sector.seal_proof,
e
)
})?;
let new_w_post_proof = params
.seal_proof
.registered_window_post_proof()
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed to lookup Window PoSt proof type for new seal proof {:?}: {}",
replace_sector.seal_proof,
e
)
})?;
if replace_w_post_proof != new_w_post_proof {
return Err(actor_error!(
ErrIllegalArgument,
"new sector window PoSt proof type {:?} must match replaced proof type {:?} (seal proof type {:?})",
replace_w_post_proof,
new_w_post_proof,
params.seal_proof
));
}
if params.expiration < replace_sector.expiration {
return Err(actor_error!(
ErrIllegalArgument,
"cannot replace sector {} expiration {} with sooner expiration {}",
params.replace_sector_number,
replace_sector.expiration,
params.expiration
));
}
state
.check_sector_health(
store,
params.replace_sector_deadline,
params.replace_sector_partition,
params.replace_sector_number,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
format!("failed to replace sector {}", params.replace_sector_number),
)
})?;
Ok(())
}
fn enroll_cron_event<BS, RT>(
rt: &mut RT,
event_epoch: ChainEpoch,
cb: CronEventPayload,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let payload = Serialized::serialize(cb)
.map_err(|e| ActorError::from(e).wrap("failed to serialize payload: {}"))?;
let ser_params = Serialized::serialize(EnrollCronEventParams {
event_epoch,
payload,
})?;
rt.send(
*STORAGE_POWER_ACTOR_ADDR,
PowerMethod::EnrollCronEvent as u64,
ser_params,
TokenAmount::zero(),
)?;
Ok(())
}
fn request_update_power<BS, RT>(rt: &mut RT, delta: PowerPair) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if delta.is_zero() {
return Ok(());
}
let delta_clone = delta.clone();
rt.send(
*STORAGE_POWER_ACTOR_ADDR,
crate::power::Method::UpdateClaimedPower as MethodNum,
Serialized::serialize(crate::power::UpdateClaimedPowerParams {
raw_byte_delta: delta.raw,
quality_adjusted_delta: delta.qa,
})?,
TokenAmount::zero(),
)
.map_err(|e| e.wrap(format!("failed to update power with {:?}", delta_clone)))?;
Ok(())
}
fn request_terminate_deals<BS, RT>(
rt: &mut RT,
epoch: ChainEpoch,
deal_ids: Vec<DealID>,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
const MAX_LENGTH: usize = 8192;
for chunk in deal_ids.chunks(MAX_LENGTH) {
rt.send(
*STORAGE_MARKET_ACTOR_ADDR,
MarketMethod::OnMinerSectorsTerminate as u64,
Serialized::serialize(OnMinerSectorsTerminateParamsRef {
epoch,
deal_ids: chunk,
})?,
TokenAmount::zero(),
)?;
}
Ok(())
}
fn schedule_early_termination_work<BS, RT>(rt: &mut RT) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
info!("scheduling early terminations with cron...");
enroll_cron_event(
rt,
rt.curr_epoch() + 1,
CronEventPayload {
event_type: CRON_EVENT_PROCESS_EARLY_TERMINATIONS,
},
)
}
fn have_pending_early_terminations(state: &State) -> bool {
let no_early_terminations = state.early_terminations.is_empty();
!no_early_terminations
}
// returns true if valid, false if invalid, error if failed to validate either way!
fn verify_windowed_post<BS, RT>(
rt: &RT,
challenge_epoch: ChainEpoch,
sectors: &[SectorOnChainInfo],
proofs: Vec<PoStProof>,
) -> Result<bool, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let miner_actor_id: u64 = if let Payload::ID(i) = rt.message().receiver().payload() {
*i
} else {
return Err(actor_error!(
ErrIllegalState,
"runtime provided bad receiver address {}",
rt.message().receiver()
));
};
// Regenerate challenge randomness, which must match that generated for the proof.
let entropy = rt.message().receiver().marshal_cbor().map_err(|e| {
ActorError::from(e).wrap("failed to marshal address for window post challenge")
})?;
let randomness: PoStRandomness =
rt.get_randomness_from_beacon(WindowedPoStChallengeSeed, challenge_epoch, &entropy)?;
let challenged_sectors = sectors
.iter()
.map(|s| SectorInfo {
proof: s.seal_proof,
sector_number: s.sector_number,
sealed_cid: s.sealed_cid,
})
.collect();
// get public inputs
let pv_info = WindowPoStVerifyInfo {
randomness,
proofs,
challenged_sectors,
prover: miner_actor_id,
};
// verify the post proof
rt.verify_post(&pv_info).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalArgument,
format!(
"invalid PoSt: proofs({:?}), randomness({:?})",
pv_info.proofs, pv_info.randomness
),
)
})?;
Ok(true)
}
fn get_verify_info<BS, RT>(
rt: &mut RT,
params: SealVerifyParams,
) -> Result<SealVerifyInfo, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if rt.curr_epoch() <= params.interactive_epoch {
return Err(actor_error!(ErrForbidden, "too early to prove sector"));
}
let commds = request_unsealed_sector_cids(
rt,
&[SectorDataSpec {
deal_ids: params.deal_ids.clone(),
sector_type: params.registered_seal_proof,
}],
)?;
let miner_actor_id: u64 = if let Payload::ID(i) = rt.message().receiver().payload() {
*i
} else {
return Err(actor_error!(
ErrIllegalState,
"runtime provided non ID receiver address {}",
rt.message().receiver()
));
};
let entropy =
rt.message().receiver().marshal_cbor().map_err(|e| {
ActorError::from(e).wrap("failed to marshal address for get verify info")
})?;
let randomness: SealRandom =
rt.get_randomness_from_tickets(SealRandomness, params.seal_rand_epoch, &entropy)?;
let interactive_randomness: InteractiveSealRandomness = rt.get_randomness_from_beacon(
InteractiveSealChallengeSeed,
params.interactive_epoch,
&entropy,
)?;
Ok(SealVerifyInfo {
registered_proof: params.registered_seal_proof,
sector_id: SectorID {
miner: miner_actor_id,
number: params.sector_num,
},
deal_ids: params.deal_ids,
interactive_randomness,
proof: params.proof,
randomness,
sealed_cid: params.sealed_cid,
unsealed_cid: commds[0],
})
}
/// Requests the storage market actor compute the unsealed sector CID from a sector's deals.
fn request_unsealed_sector_cids<BS, RT>(
rt: &mut RT,
data_commitment_inputs: &[SectorDataSpec],
) -> Result<Vec<Cid>, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if data_commitment_inputs.is_empty() {
return Ok(vec![]);
}
let ret: ComputeDataCommitmentReturn = rt
.send(
*STORAGE_MARKET_ACTOR_ADDR,
MarketMethod::ComputeDataCommitment as u64,
Serialized::serialize(ComputeDataCommitmentParamsRef {
inputs: data_commitment_inputs,
})?,
TokenAmount::zero(),
)?
.deserialize()?;
if data_commitment_inputs.len() != ret.commds.len() {
return Err(actor_error!(ErrIllegalState,
"number of data commitments computed {} does not match number of data commitment inputs {}",
ret.commds.len(), data_commitment_inputs.len()
));
}
Ok(ret.commds)
}
fn request_deal_weights<BS, RT>(
rt: &mut RT,
sectors: &[market::SectorDeals],
) -> Result<VerifyDealsForActivationReturn, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
// Short-circuit if there are no deals in any of the sectors.
let mut deal_count = 0;
for sector in sectors {
deal_count += sector.deal_ids.len();
}
if deal_count == 0 {
let mut empty_result = VerifyDealsForActivationReturn {
sectors: Vec::with_capacity(sectors.len()),
};
for _ in 0..sectors.len() {
empty_result.sectors.push(market::SectorWeights {
deal_space: 0,
deal_weight: 0.into(),
verified_deal_weight: 0.into(),
});
}
return Ok(empty_result);
}
let serialized = rt.send(
*STORAGE_MARKET_ACTOR_ADDR,
MarketMethod::VerifyDealsForActivation as u64,
Serialized::serialize(VerifyDealsForActivationParamsRef { sectors })?,
TokenAmount::zero(),
)?;
Ok(serialized.deserialize()?)
}
/// Requests the current epoch target block reward from the reward actor.
/// return value includes reward, smoothed estimate of reward, and baseline power
fn request_current_epoch_block_reward<BS, RT>(
rt: &mut RT,
) -> Result<ThisEpochRewardReturn, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let ret = rt
.send(
*REWARD_ACTOR_ADDR,
crate::reward::Method::ThisEpochReward as MethodNum,
Default::default(),
TokenAmount::zero(),
)
.map_err(|e| e.wrap("failed to check epoch baseline power"))?;
let ret: ThisEpochRewardReturn = ret
.deserialize()
.map_err(|e| ActorError::from(e).wrap("failed to unmarshal target power value"))?;
Ok(ret)
}
/// Requests the current network total power and pledge from the power actor.
fn request_current_total_power<BS, RT>(rt: &mut RT) -> Result<CurrentTotalPowerReturn, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let ret = rt
.send(
*STORAGE_POWER_ACTOR_ADDR,
crate::power::Method::CurrentTotalPower as MethodNum,
Default::default(),
TokenAmount::zero(),
)
.map_err(|e| e.wrap("failed to check current power"))?;
let power: CurrentTotalPowerReturn = ret
.deserialize()
.map_err(|e| ActorError::from(e).wrap("failed to unmarshal power total value"))?;
Ok(power)
}
/// Resolves an address to an ID address and verifies that it is address of an account or multisig actor.
fn resolve_control_address<BS, RT>(rt: &RT, raw: Address) -> Result<Address, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let resolved = rt
.resolve_address(&raw)?
.ok_or_else(|| actor_error!(ErrIllegalArgument, "unable to resolve address: {}", raw))?;
let owner_code = rt
.get_actor_code_cid(&resolved)?
.ok_or_else(|| actor_error!(ErrIllegalArgument, "no code for address: {}", resolved))?;
if !is_principal(&owner_code) {
return Err(actor_error!(
ErrIllegalArgument,
"owner actor type must be a principal, was {}",
owner_code
));
}
Ok(resolved)
}
/// Resolves an address to an ID address and verifies that it is address of an account actor with an associated BLS key.
/// The worker must be BLS since the worker key will be used alongside a BLS-VRF.
fn resolve_worker_address<BS, RT>(rt: &mut RT, raw: Address) -> Result<Address, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let resolved = rt
.resolve_address(&raw)?
.ok_or_else(|| actor_error!(ErrIllegalArgument, "unable to resolve address: {}", raw))?;
let worker_code = rt
.get_actor_code_cid(&resolved)?
.ok_or_else(|| actor_error!(ErrIllegalArgument, "no code for address: {}", resolved))?;
if worker_code != *ACCOUNT_ACTOR_CODE_ID {
return Err(actor_error!(
ErrIllegalArgument,
"worker actor type must be an account, was {}",
worker_code
));
}
if raw.protocol() != Protocol::BLS {
let ret = rt.send(
resolved,
AccountMethod::PubkeyAddress as u64,
Serialized::default(),
TokenAmount::zero(),
)?;
let pub_key: Address = ret.deserialize().map_err(|e| {
ActorError::from(e).wrap(format!("failed to deserialize address result: {:?}", ret))
})?;
if pub_key.protocol() != Protocol::BLS {
return Err(actor_error!(
ErrIllegalArgument,
"worker account {} must have BLS pubkey, was {}",
resolved,
pub_key.protocol()
));
}
}
Ok(resolved)
}
fn burn_funds<BS, RT>(rt: &mut RT, amount: TokenAmount) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
log::debug!(
"storage provder {} burning {}",
rt.message().receiver(),
amount
);
if amount.is_positive() {
rt.send(
*BURNT_FUNDS_ACTOR_ADDR,
METHOD_SEND,
Serialized::default(),
amount,
)?;
}
Ok(())
}
fn notify_pledge_changed<BS, RT>(rt: &mut RT, pledge_delta: &BigInt) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
if !pledge_delta.is_zero() {
rt.send(
*STORAGE_POWER_ACTOR_ADDR,
PowerMethod::UpdatePledgeTotal as u64,
Serialized::serialize(BigIntSer(pledge_delta))?,
TokenAmount::zero(),
)?;
}
Ok(())
}
/// Assigns proving period offset randomly in the range [0, WPoStProvingPeriod) by hashing
/// the actor's address and current epoch.
fn assign_proving_period_offset(
addr: Address,
current_epoch: ChainEpoch,
blake2b: impl FnOnce(&[u8]) -> Result<[u8; 32], Box<dyn StdError>>,
) -> Result<ChainEpoch, Box<dyn StdError>> {
let mut my_addr = addr.marshal_cbor()?;
my_addr.write_i64::<BigEndian>(current_epoch)?;
let digest = blake2b(&my_addr)?;
let mut offset: u64 = BigEndian::read_u64(&digest);
offset %= WPOST_PROVING_PERIOD as u64;
// Conversion from i64 to u64 is safe because it's % WPOST_PROVING_PERIOD which is i64
Ok(offset as ChainEpoch)
}
/// Computes the epoch at which a proving period should start such that it is greater than the current epoch, and
/// has a defined offset from being an exact multiple of WPoStProvingPeriod.
/// A miner is exempt from Winow PoSt until the first full proving period starts.
fn current_proving_period_start(current_epoch: ChainEpoch, offset: ChainEpoch) -> ChainEpoch {
let curr_modulus = current_epoch % WPOST_PROVING_PERIOD;
let period_progress = if curr_modulus >= offset {
curr_modulus - offset
} else {
WPOST_PROVING_PERIOD - (offset - curr_modulus)
};
current_epoch - period_progress
}
fn current_deadline_index(current_epoch: ChainEpoch, period_start: ChainEpoch) -> usize {
((current_epoch - period_start) / WPOST_CHALLENGE_WINDOW) as usize
}
/// Computes deadline information for a fault or recovery declaration.
/// If the deadline has not yet elapsed, the declaration is taken as being for the current proving period.
/// If the deadline has elapsed, it's instead taken as being for the next proving period after the current epoch.
fn declaration_deadline_info(
period_start: ChainEpoch,
deadline_idx: usize,
current_epoch: ChainEpoch,
) -> Result<DeadlineInfo, String> {
if deadline_idx >= WPOST_PERIOD_DEADLINES as usize {
return Err(format!(
"invalid deadline {}, must be < {}",
deadline_idx, WPOST_PERIOD_DEADLINES
));
}
let deadline = new_deadline_info(period_start, deadline_idx, current_epoch).next_not_elapsed();
Ok(deadline)
}
/// Checks that a fault or recovery declaration at a specific deadline is outside the exclusion window for the deadline.
fn validate_fr_declaration_deadline(deadline: &DeadlineInfo) -> Result<(), String> {
if deadline.fault_cutoff_passed() {
Err("late fault or recovery declaration".to_string())
} else {
Ok(())
}
}
/// Validates that a partition contains the given sectors.
fn validate_partition_contains_sectors(
partition: &Partition,
sectors: &mut UnvalidatedBitField,
) -> Result<(), String> {
let sectors = sectors
.validate()
.map_err(|e| format!("failed to check sectors: {}", e))?;
// Check that the declared sectors are actually assigned to the partition.
if partition.sectors.contains_all(sectors) {
Ok(())
} else {
Err("not all sectors are assigned to the partition".to_string())
}
}
fn termination_penalty(
sector_size: SectorSize,
current_epoch: ChainEpoch,
reward_estimate: &FilterEstimate,
network_qa_power_estimate: &FilterEstimate,
sectors: &[SectorOnChainInfo],
) -> TokenAmount {
let mut total_fee = TokenAmount::zero();
for sector in sectors {
let sector_power = qa_power_for_sector(sector_size, sector);
let fee = pledge_penalty_for_termination(
§or.expected_day_reward,
current_epoch - sector.activation,
§or.expected_storage_pledge,
network_qa_power_estimate,
§or_power,
reward_estimate,
§or.replaced_day_reward,
sector.replaced_sector_age,
);
total_fee += fee;
}
total_fee
}
fn consensus_fault_active(info: &MinerInfo, curr_epoch: ChainEpoch) -> bool {
// For penalization period to last for exactly finality epochs
// consensus faults are active until currEpoch exceeds ConsensusFaultElapsed
curr_epoch <= info.consensus_fault_elapsed
}
fn power_for_sector(sector_size: SectorSize, sector: &SectorOnChainInfo) -> PowerPair {
PowerPair {
raw: BigInt::from(sector_size as u64),
qa: qa_power_for_sector(sector_size, sector),
}
}
/// Returns the sum of the raw byte and quality-adjusted power for sectors.
fn power_for_sectors(sector_size: SectorSize, sectors: &[SectorOnChainInfo]) -> PowerPair {
let qa = sectors
.iter()
.map(|s| qa_power_for_sector(sector_size, s))
.sum();
PowerPair {
raw: BigInt::from(sector_size as u64) * BigInt::from(sectors.len()),
qa,
}
}
fn get_miner_info<BS>(store: &BS, state: &State) -> Result<MinerInfo, ActorError>
where
BS: BlockStore,
{
state
.get_info(store)
.map_err(|e| e.downcast_default(ExitCode::ErrIllegalState, "could not read miner info"))
}
fn process_pending_worker<BS, RT>(
info: &mut MinerInfo,
rt: &RT,
state: &mut State,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let pending_worker_key = if let Some(k) = &info.pending_worker_key {
k
} else {
return Ok(());
};
if rt.curr_epoch() < pending_worker_key.effective_at {
return Ok(());
}
info.worker = pending_worker_key.new_worker;
info.pending_worker_key = None;
state
.save_info(rt.store(), info)
.map_err(|e| e.downcast_default(ExitCode::ErrIllegalState, "failed to save miner info"))
}
/// Repays all fee debt and then verifies that the miner has amount needed to cover
/// the pledge requirement after burning all fee debt. If not aborts.
/// Returns an amount that must be burnt by the actor.
/// Note that this call does not compute recent vesting so reported unlocked balance
/// may be slightly lower than the true amount. Computing vesting here would be
/// almost always redundant since vesting is quantized to ~daily units. Vesting
/// will be at most one proving period old if computed in the cron callback.
fn repay_debts_or_abort<BS, RT>(rt: &RT, state: &mut State) -> Result<TokenAmount, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
let res = state.repay_debts(&rt.current_balance()?).map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"unlocked balance can not repay fee debt",
)
})?;
Ok(res)
}
fn replaced_sector_parameters(
curr_epoch: ChainEpoch,
precommit: &SectorPreCommitOnChainInfo,
replaced_by_num: &HashMap<SectorNumber, SectorOnChainInfo>,
) -> Result<(TokenAmount, ChainEpoch, TokenAmount), ActorError> {
if !precommit.info.replace_capacity {
return Ok(Default::default());
}
let replaced = replaced_by_num
.get(&precommit.info.replace_sector_number)
.ok_or_else(|| {
actor_error!(
ErrNotFound,
"no such sector {} to replace",
precommit.info.replace_sector_number
)
})?;
let age = std::cmp::max(0, curr_epoch - replaced.activation);
// The sector will actually be active for the period between activation and its next
// proving deadline, but this covers the period for which we will be looking to the old sector
// for termination fees.
Ok((
replaced.initial_pledge.clone(),
age,
replaced.expected_day_reward.clone(),
))
}
fn check_control_addresses(control_addrs: &[Address]) -> Result<(), ActorError> {
if control_addrs.len() > MAX_CONTROL_ADDRESSES {
return Err(actor_error!(
ErrIllegalArgument,
"control addresses length {} exceeds max control addresses length {}",
control_addrs.len(),
MAX_CONTROL_ADDRESSES
));
}
Ok(())
}
fn check_valid_post_proof_type(proof_type: RegisteredPoStProof) -> Result<(), ActorError> {
match proof_type {
RegisteredPoStProof::StackedDRGWindow32GiBV1
| RegisteredPoStProof::StackedDRGWindow64GiBV1 => Ok(()),
_ => Err(actor_error!(
ErrIllegalArgument,
"proof type {:?} not allowed for new miner actors",
proof_type
)),
}
}
fn check_peer_info(peer_id: &[u8], multiaddrs: &[BytesDe]) -> Result<(), ActorError> {
if peer_id.len() > MAX_PEER_ID_LENGTH {
return Err(actor_error!(
ErrIllegalArgument,
"peer ID size of {} exceeds maximum size of {}",
peer_id.len(),
MAX_PEER_ID_LENGTH
));
}
let mut total_size = 0;
for ma in multiaddrs {
if ma.0.is_empty() {
return Err(actor_error!(ErrIllegalArgument, "invalid empty multiaddr"));
}
total_size += ma.0.len();
}
if total_size > MAX_MULTIADDR_DATA {
return Err(actor_error!(
ErrIllegalArgument,
"multiaddr size of {} exceeds maximum of {}",
total_size,
MAX_MULTIADDR_DATA
));
}
Ok(())
}
fn confirm_sector_proofs_valid_internal<BS, RT>(
rt: &mut RT,
pre_commits: Vec<SectorPreCommitOnChainInfo>,
this_epoch_baseline_power: &BigInt,
this_epoch_reward_smoothed: &FilterEstimate,
quality_adj_power_smoothed: &FilterEstimate,
) -> Result<(), ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
// get network stats from other actors
let circulating_supply = rt.total_fil_circ_supply()?;
// Ideally, we'd combine some of these operations, but at least we have
// a constant number of them.
// Committed-capacity sectors licensed for early removal by new sectors being proven.
let mut replace_sectors = DeadlineSectorMap::new();
let activation = rt.curr_epoch();
// Pre-commits for new sectors.
let mut valid_pre_commits = Vec::<SectorPreCommitOnChainInfo>::new();
for pre_commit in pre_commits {
if !pre_commit.info.deal_ids.is_empty() {
// Check (and activate) storage deals associated to sector. Abort if checks failed.
let res = rt.send(
*STORAGE_MARKET_ACTOR_ADDR,
crate::market::Method::ActivateDeals as MethodNum,
Serialized::serialize(ActivateDealsParams {
deal_ids: pre_commit.info.deal_ids.clone(),
sector_expiry: pre_commit.info.expiration,
})?,
TokenAmount::zero(),
);
if let Err(e) = res {
info!(
"failed to activate deals on sector {}, dropping from prove commit set: {}",
pre_commit.info.sector_number,
e.msg()
);
continue;
}
}
if pre_commit.info.replace_capacity {
replace_sectors
.add_values(
pre_commit.info.replace_sector_deadline,
pre_commit.info.replace_sector_partition,
&[pre_commit.info.replace_sector_number],
)
.map_err(|e| {
actor_error!(
ErrIllegalArgument,
"failed to record sectors for replacement: {}",
e
)
})?;
}
valid_pre_commits.push(pre_commit);
}
// When all prove commits have failed abort early
if valid_pre_commits.is_empty() {
return Err(actor_error!(
ErrIllegalArgument,
"all prove commits failed to validate"
));
}
let (total_pledge, newly_vested) = rt.transaction(|state: &mut State, rt| {
let store = rt.store();
let info = get_miner_info(store, state)?;
// Schedule expiration for replaced sectors to the end of their next deadline window.
// They can't be removed right now because we want to challenge them immediately before termination.
let replaced = state
.reschedule_sector_expirations(
store,
rt.curr_epoch(),
info.sector_size,
replace_sectors,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to replace sector expirations",
)
})?;
let replaced_by_sector_number: HashMap<u64, SectorOnChainInfo> =
replaced.into_iter().map(|s| (s.sector_number, s)).collect();
let mut new_sector_numbers = Vec::<SectorNumber>::with_capacity(valid_pre_commits.len());
let mut deposit_to_unlock = TokenAmount::zero();
let mut new_sectors = Vec::<SectorOnChainInfo>::new();
let mut total_pledge = TokenAmount::zero();
for pre_commit in valid_pre_commits {
// compute initial pledge
let duration = pre_commit.info.expiration - activation;
// This should have been caught in precommit, but don't let other sectors fail because of it.
if duration < MIN_SECTOR_EXPIRATION {
warn!(
"precommit {} has lifetime {} less than minimum {}. ignoring",
pre_commit.info.sector_number, duration, MIN_SECTOR_EXPIRATION,
);
continue;
}
let power = qa_power_for_weight(
info.sector_size,
duration,
&pre_commit.deal_weight,
&pre_commit.verified_deal_weight,
);
let day_reward = expected_reward_for_power(
this_epoch_reward_smoothed,
quality_adj_power_smoothed,
&power,
crate::EPOCHS_IN_DAY,
);
// The storage pledge is recorded for use in computing the penalty if this sector is terminated
// before its declared expiration.
// It's not capped to 1 FIL, so can exceed the actual initial pledge requirement.
let storage_pledge = expected_reward_for_power(
this_epoch_reward_smoothed,
quality_adj_power_smoothed,
&power,
INITIAL_PLEDGE_PROJECTION_PERIOD,
);
let mut initial_pledge = initial_pledge_for_power(
&power,
this_epoch_baseline_power,
this_epoch_reward_smoothed,
quality_adj_power_smoothed,
&circulating_supply,
);
// Lower-bound the pledge by that of the sector being replaced.
// Record the replaced age and reward rate for termination fee calculations.
let (replaced_pledge, replaced_sector_age, replaced_day_reward) =
replaced_sector_parameters(
rt.curr_epoch(),
&pre_commit,
&replaced_by_sector_number,
)?;
initial_pledge = std::cmp::max(initial_pledge, replaced_pledge);
deposit_to_unlock += &pre_commit.pre_commit_deposit;
total_pledge += &initial_pledge;
let new_sector_info = SectorOnChainInfo {
sector_number: pre_commit.info.sector_number,
seal_proof: pre_commit.info.seal_proof,
sealed_cid: pre_commit.info.sealed_cid,
deal_ids: pre_commit.info.deal_ids,
expiration: pre_commit.info.expiration,
activation,
deal_weight: pre_commit.deal_weight,
verified_deal_weight: pre_commit.verified_deal_weight,
initial_pledge,
expected_day_reward: day_reward,
expected_storage_pledge: storage_pledge,
replaced_sector_age,
replaced_day_reward,
};
new_sector_numbers.push(new_sector_info.sector_number);
new_sectors.push(new_sector_info);
}
state.put_sectors(store, new_sectors.clone()).map_err(|e| {
e.downcast_default(ExitCode::ErrIllegalState, "failed to put new sectors")
})?;
state
.delete_precommitted_sectors(store, &new_sector_numbers)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to delete precommited sectors",
)
})?;
state
.assign_sectors_to_deadlines(
store,
rt.curr_epoch(),
new_sectors,
info.window_post_partition_sectors,
info.sector_size,
)
.map_err(|e| {
e.downcast_default(
ExitCode::ErrIllegalState,
"failed to assign new sectors to deadlines",
)
})?;
let newly_vested = TokenAmount::zero();
// Unlock deposit for successful proofs, make it available for lock-up as initial pledge.
state
.add_pre_commit_deposit(&(-deposit_to_unlock))
.map_err(|e| actor_error!(ErrIllegalState, "failed to add precommit deposit: {}", e))?;
let unlocked_balance = state
.get_unlocked_balance(&rt.current_balance()?)
.map_err(|e| {
actor_error!(
ErrIllegalState,
"failed to calculate unlocked balance: {}",
e
)
})?;
if unlocked_balance < total_pledge {
return Err(actor_error!(
ErrInsufficientFunds,
"insufficient funds for aggregate initial pledge requirement {}, available: {}",
total_pledge,
unlocked_balance
));
}
state
.add_initial_pledge(&total_pledge)
.map_err(|e| actor_error!(ErrIllegalState, "failed to add initial pledge: {}", e))?;
state
.check_balance_invariants(&rt.current_balance()?)
.map_err(|e| {
ActorError::new(
ErrBalanceInvariantBroken,
format!("balance invariant broken: {}", e),
)
})?;
Ok((total_pledge, newly_vested))
})?;
// Request pledge update for activated sector.
notify_pledge_changed(rt, &(total_pledge - newly_vested))?;
Ok(())
}
impl ActorCode for Actor {
fn invoke_method<BS, RT>(
rt: &mut RT,
method: MethodNum,
params: &Serialized,
) -> Result<Serialized, ActorError>
where
BS: BlockStore,
RT: Runtime<BS>,
{
match FromPrimitive::from_u64(method) {
Some(Method::Constructor) => {
Self::constructor(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ControlAddresses) => {
let res = Self::control_addresses(rt)?;
Ok(Serialized::serialize(&res)?)
}
Some(Method::ChangeWorkerAddress) => {
Self::change_worker_address(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ChangePeerID) => {
Self::change_peer_id(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::SubmitWindowedPoSt) => {
Self::submit_windowed_post(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::PreCommitSector) => {
Self::pre_commit_sector(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ProveCommitSector) => {
Self::prove_commit_sector(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ExtendSectorExpiration) => {
Self::extend_sector_expiration(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::TerminateSectors) => {
let ret = Self::terminate_sectors(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::serialize(ret)?)
}
Some(Method::DeclareFaults) => {
Self::declare_faults(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::DeclareFaultsRecovered) => {
Self::declare_faults_recovered(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::OnDeferredCronEvent) => {
Self::on_deferred_cron_event(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::CheckSectorProven) => {
Self::check_sector_proven(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ApplyRewards) => {
Self::apply_rewards(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ReportConsensusFault) => {
Self::report_consensus_fault(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::WithdrawBalance) => {
let res = Self::withdraw_balance(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::serialize(&res)?)
}
Some(Method::ConfirmSectorProofsValid) => {
Self::confirm_sector_proofs_valid(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ChangeMultiaddrs) => {
Self::change_multiaddresses(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::CompactPartitions) => {
Self::compact_partitions(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::CompactSectorNumbers) => {
Self::compact_sector_numbers(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ConfirmUpdateWorkerKey) => {
Self::confirm_update_worker_key(rt)?;
Ok(Serialized::default())
}
Some(Method::RepayDebt) => {
Self::repay_debt(rt)?;
Ok(Serialized::default())
}
Some(Method::ChangeOwnerAddress) => {
Self::change_owner_address(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::DisputeWindowedPoSt) => {
Self::dispute_windowed_post(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::PreCommitSectorBatch) => {
Self::pre_commit_sector_batch(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
Some(Method::ProveCommitAggregate) => {
Self::prove_commit_aggregate(rt, rt.deserialize_params(params)?)?;
Ok(Serialized::default())
}
None => Err(actor_error!(SysErrInvalidMethod, "Invalid method")),
}
}
}
| 37.595435 | 236 | 0.541395 |
56b6025260569608e284599635c4ba8d9da5b25f | 5,708 | // SPDX-License-Identifier: (Apache-2.0 OR MIT)
use pyo3::ffi::*;
use std::os::raw::c_char;
use std::ptr::NonNull;
use std::sync::Once;
pub static mut HASH_SEED: u64 = 0;
pub static mut NONE: *mut PyObject = 0 as *mut PyObject;
pub static mut TRUE: *mut PyObject = 0 as *mut PyObject;
pub static mut FALSE: *mut PyObject = 0 as *mut PyObject;
pub static mut STR_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut BYTES_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut BYTEARRAY_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut DICT_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut LIST_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut TUPLE_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut NONE_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut BOOL_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut INT_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut FLOAT_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut DATETIME_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut DATE_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut TIME_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut UUID_TYPE: *mut PyTypeObject = 0 as *mut PyTypeObject;
pub static mut ARRAY_TYPE: Option<NonNull<PyTypeObject>> = None;
pub static mut INT_ATTR_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut UTCOFFSET_METHOD_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut NORMALIZE_METHOD_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut CONVERT_METHOD_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut DST_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut DICT_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut DATACLASS_FIELDS_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut ARRAY_STRUCT_STR: *mut PyObject = 0 as *mut PyObject;
pub static mut STR_HASH_FUNCTION: Option<hashfunc> = None;
static INIT: Once = Once::new();
pub fn init_typerefs() {
INIT.call_once(|| unsafe {
PyDateTime_IMPORT();
HASH_SEED = rand::random::<u64>();
NONE = Py_None();
TRUE = Py_True();
FALSE = Py_False();
let unicode = PyUnicode_New(0, 255);
STR_TYPE = (*unicode).ob_type;
STR_HASH_FUNCTION = (*((*unicode).ob_type)).tp_hash;
BYTES_TYPE = (*PyBytes_FromStringAndSize("".as_ptr() as *const c_char, 0)).ob_type;
BYTEARRAY_TYPE = (*PyByteArray_FromStringAndSize("".as_ptr() as *const c_char, 0)).ob_type;
DICT_TYPE = (*PyDict_New()).ob_type;
LIST_TYPE = (*PyList_New(0 as Py_ssize_t)).ob_type;
TUPLE_TYPE = (*PyTuple_New(0 as Py_ssize_t)).ob_type;
NONE_TYPE = (*NONE).ob_type;
BOOL_TYPE = (*TRUE).ob_type;
INT_TYPE = (*PyLong_FromLongLong(0)).ob_type;
FLOAT_TYPE = (*PyFloat_FromDouble(0.0)).ob_type;
DATETIME_TYPE = look_up_datetime_type();
DATE_TYPE = look_up_date_type();
TIME_TYPE = look_up_time_type();
UUID_TYPE = look_up_uuid_type();
ARRAY_TYPE = look_up_array_type();
INT_ATTR_STR = PyUnicode_FromStringAndSize("int".as_ptr() as *const c_char, 3);
UTCOFFSET_METHOD_STR =
PyUnicode_FromStringAndSize("utcoffset".as_ptr() as *const c_char, 9);
NORMALIZE_METHOD_STR =
PyUnicode_FromStringAndSize("normalize".as_ptr() as *const c_char, 9);
CONVERT_METHOD_STR = PyUnicode_FromStringAndSize("convert".as_ptr() as *const c_char, 7);
DST_STR = PyUnicode_FromStringAndSize("dst".as_ptr() as *const c_char, 3);
DICT_STR = PyUnicode_FromStringAndSize("__dict__".as_ptr() as *const c_char, 8);
DATACLASS_FIELDS_STR =
PyUnicode_FromStringAndSize("__dataclass_fields__".as_ptr() as *const c_char, 20);
ARRAY_STRUCT_STR = pyo3::ffi::PyUnicode_FromStringAndSize(
"__array_struct__".as_ptr() as *const c_char,
16,
);
});
}
unsafe fn look_up_array_type() -> Option<NonNull<PyTypeObject>> {
let numpy = PyImport_ImportModule("numpy\0".as_ptr() as *const c_char);
if numpy.is_null() {
PyErr_Clear();
return None;
} else {
let mod_dict = PyModule_GetDict(numpy);
let ptr = PyMapping_GetItemString(mod_dict, "ndarray\0".as_ptr() as *const c_char);
Py_XDECREF(ptr);
// Py_XDECREF(mod_dict) causes segfault when pytest exits
Py_XDECREF(numpy);
Some(NonNull::new_unchecked(ptr as *mut PyTypeObject))
}
}
unsafe fn look_up_uuid_type() -> *mut PyTypeObject {
let uuid_mod = PyImport_ImportModule("uuid\0".as_ptr() as *const c_char);
let uuid_mod_dict = PyModule_GetDict(uuid_mod);
let uuid = PyMapping_GetItemString(uuid_mod_dict, "NAMESPACE_DNS\0".as_ptr() as *const c_char);
let ptr = (*uuid).ob_type;
Py_DECREF(uuid);
Py_DECREF(uuid_mod_dict);
Py_DECREF(uuid_mod);
ptr
}
unsafe fn look_up_datetime_type() -> *mut PyTypeObject {
let datetime = (PyDateTimeAPI.DateTime_FromDateAndTime)(
1970,
1,
1,
0,
0,
0,
0,
NONE,
PyDateTimeAPI.DateTimeType,
);
let ptr = (*datetime).ob_type;
Py_DECREF(datetime);
ptr
}
unsafe fn look_up_date_type() -> *mut PyTypeObject {
let date = (PyDateTimeAPI.Date_FromDate)(1970, 1, 1, PyDateTimeAPI.DateType);
let ptr = (*date).ob_type;
Py_DECREF(date);
ptr
}
unsafe fn look_up_time_type() -> *mut PyTypeObject {
let time = (PyDateTimeAPI.Time_FromTime)(0, 0, 0, 0, NONE, PyDateTimeAPI.TimeType);
let ptr = (*time).ob_type;
Py_DECREF(time);
ptr
}
| 41.970588 | 99 | 0.6822 |
8aba0014e97bb1e5b31fa1e80312e6314448294d | 6,327 | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem;
use std::slice;
use std::vec;
use fold::MoveMap;
/// A vector type optimized for cases where the size is almost always 0 or 1
pub struct SmallVector<T> {
repr: SmallVectorRepr<T>,
}
enum SmallVectorRepr<T> {
Zero,
One(T),
Many(Vec<T>),
}
impl<T> FromIterator<T> for SmallVector<T> {
fn from_iter<I: Iterator<T>>(iter: I) -> SmallVector<T> {
let mut v = SmallVector::zero();
v.extend(iter);
v
}
}
impl<T> Extend<T> for SmallVector<T> {
fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
for val in iter {
self.push(val);
}
}
}
impl<T> SmallVector<T> {
pub fn zero() -> SmallVector<T> {
SmallVector { repr: Zero }
}
pub fn one(v: T) -> SmallVector<T> {
SmallVector { repr: One(v) }
}
pub fn many(vs: Vec<T>) -> SmallVector<T> {
SmallVector { repr: Many(vs) }
}
pub fn as_slice<'a>(&'a self) -> &'a [T] {
match self.repr {
Zero => {
let result: &[T] = &[];
result
}
One(ref v) => slice::ref_slice(v),
Many(ref vs) => vs.as_slice()
}
}
pub fn push(&mut self, v: T) {
match self.repr {
Zero => self.repr = One(v),
One(..) => {
let one = mem::replace(&mut self.repr, Zero);
match one {
One(v1) => mem::replace(&mut self.repr, Many(vec!(v1, v))),
_ => unreachable!()
};
}
Many(ref mut vs) => vs.push(v)
}
}
pub fn push_all(&mut self, other: SmallVector<T>) {
for v in other.into_iter() {
self.push(v);
}
}
pub fn get<'a>(&'a self, idx: uint) -> &'a T {
match self.repr {
One(ref v) if idx == 0 => v,
Many(ref vs) => &vs[idx],
_ => panic!("out of bounds access")
}
}
pub fn expect_one(self, err: &'static str) -> T {
match self.repr {
One(v) => v,
Many(v) => {
if v.len() == 1 {
v.into_iter().next().unwrap()
} else {
panic!(err)
}
}
_ => panic!(err)
}
}
/// Deprecated: use `into_iter`.
#[deprecated = "use into_iter"]
pub fn move_iter(self) -> MoveItems<T> {
self.into_iter()
}
pub fn into_iter(self) -> MoveItems<T> {
let repr = match self.repr {
Zero => ZeroIterator,
One(v) => OneIterator(v),
Many(vs) => ManyIterator(vs.into_iter())
};
MoveItems { repr: repr }
}
pub fn len(&self) -> uint {
match self.repr {
Zero => 0,
One(..) => 1,
Many(ref vals) => vals.len()
}
}
pub fn is_empty(&self) -> bool { self.len() == 0 }
}
pub struct MoveItems<T> {
repr: MoveItemsRepr<T>,
}
enum MoveItemsRepr<T> {
ZeroIterator,
OneIterator(T),
ManyIterator(vec::MoveItems<T>),
}
impl<T> Iterator<T> for MoveItems<T> {
fn next(&mut self) -> Option<T> {
match self.repr {
ZeroIterator => None,
OneIterator(..) => {
let mut replacement = ZeroIterator;
mem::swap(&mut self.repr, &mut replacement);
match replacement {
OneIterator(v) => Some(v),
_ => unreachable!()
}
}
ManyIterator(ref mut inner) => inner.next()
}
}
fn size_hint(&self) -> (uint, Option<uint>) {
match self.repr {
ZeroIterator => (0, Some(0)),
OneIterator(..) => (1, Some(1)),
ManyIterator(ref inner) => inner.size_hint()
}
}
}
impl<T> MoveMap<T> for SmallVector<T> {
fn move_map(self, f: |T| -> T) -> SmallVector<T> {
let repr = match self.repr {
Zero => Zero,
One(v) => One(f(v)),
Many(vs) => Many(vs.move_map(f))
};
SmallVector { repr: repr }
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_len() {
let v: SmallVector<int> = SmallVector::zero();
assert_eq!(0, v.len());
assert_eq!(1, SmallVector::one(1i).len());
assert_eq!(5, SmallVector::many(vec!(1i, 2, 3, 4, 5)).len());
}
#[test]
fn test_push_get() {
let mut v = SmallVector::zero();
v.push(1i);
assert_eq!(1, v.len());
assert_eq!(&1, v.get(0));
v.push(2);
assert_eq!(2, v.len());
assert_eq!(&2, v.get(1));
v.push(3);
assert_eq!(3, v.len());
assert_eq!(&3, v.get(2));
}
#[test]
fn test_from_iter() {
let v: SmallVector<int> = (vec!(1i, 2, 3)).into_iter().collect();
assert_eq!(3, v.len());
assert_eq!(&1, v.get(0));
assert_eq!(&2, v.get(1));
assert_eq!(&3, v.get(2));
}
#[test]
fn test_move_iter() {
let v = SmallVector::zero();
let v: Vec<int> = v.into_iter().collect();
assert_eq!(Vec::new(), v);
let v = SmallVector::one(1i);
assert_eq!(vec!(1i), v.into_iter().collect());
let v = SmallVector::many(vec!(1i, 2i, 3i));
assert_eq!(vec!(1i, 2i, 3i), v.into_iter().collect());
}
#[test]
#[should_fail]
fn test_expect_one_zero() {
let _: int = SmallVector::zero().expect_one("");
}
#[test]
#[should_fail]
fn test_expect_one_many() {
SmallVector::many(vec!(1i, 2)).expect_one("");
}
#[test]
fn test_expect_one_one() {
assert_eq!(1i, SmallVector::one(1i).expect_one(""));
assert_eq!(1i, SmallVector::many(vec!(1i)).expect_one(""));
}
}
| 25.409639 | 79 | 0.488857 |
e435e97346d0b80d5259fb607dc6840bb5ec2bf6 | 212 | //! `NodeAttribute` object (camera).
use crate::v7400::object::nodeattribute::NodeAttributeHandle;
define_object_subtype! {
/// `NodeAttribute` node handle (camera).
CameraHandle: NodeAttributeHandle
}
| 23.555556 | 61 | 0.740566 |
763c3f0868c328b7b7e90304ae55b8451bac90b2 | 2,021 | use super::Texture;
use crate::{vec3::Color, vec3::Point3};
// assume 24 bit depth
const BYTES_PER_PIXEL: usize = 3;
pub struct ImageTexture {
data: Vec<u8>,
width: usize,
height: usize,
bytes_per_scanline: usize,
}
impl ImageTexture {
pub fn from_bmp_data(bmp_data: &Vec<u8>) -> Self {
let data_position = u32::from_le_bytes([
bmp_data[0x0A],
bmp_data[0x0B],
bmp_data[0x0C],
bmp_data[0x0D],
]);
// assuming windows BITMAPINFOHEADER, these are i32
let width = i32::from_le_bytes([
bmp_data[0x12],
bmp_data[0x13],
bmp_data[0x14],
bmp_data[0x15],
]) as usize;
let height = i32::from_le_bytes([
bmp_data[0x16],
bmp_data[0x17],
bmp_data[0x18],
bmp_data[0x19],
]) as usize;
Self {
data: bmp_data[(data_position as usize)..bmp_data.len()].to_vec(),
height,
width,
bytes_per_scanline: BYTES_PER_PIXEL * width,
}
}
}
impl Texture for ImageTexture {
fn value(&self, u: f64, v: f64, _: &Point3) -> Color {
let u = u.clamp(0.0, 1.0);
// This is a deviation from the book, where v gets flipped.
// This is probably because the BMP loader loads in stuff upside down.
//let v = 1.0 - v.clamp(0.0, 1.0);
let v = v.clamp(0.0, 1.0);
let mut i = (u * self.width as f64) as usize;
let mut j = (v * self.height as f64) as usize;
if i >= self.width { i = self.width - 1 };
if j >= self.height { j = self.height - 1 };
let color_scale = 1.0 / 255.0;
let pixel = j * self.bytes_per_scanline + i * BYTES_PER_PIXEL;
Color {
x: color_scale * *self.data.get(pixel + 2).unwrap() as f64,
y: color_scale * *self.data.get(pixel + 1).unwrap() as f64,
z: color_scale * *self.data.get(pixel).unwrap() as f64,
}
}
}
| 31.578125 | 78 | 0.539337 |
bb3e7a725a2b85acb36cecf4d80650e1f6e2ecf5 | 2,849 | extern crate tempdir;
extern crate libc;
use std::io::prelude::*;
use std::ffi::{OsStr};
use tempdir::{TempDir};
fn run<S: AsRef<OsStr>>(cmd: S, args: &[S]) -> u32 {
let mut cmd = std::process::Command::new(cmd);
for arg in args {
cmd.arg(arg);
}
let mut child = cmd.spawn().unwrap();
let pid = child.id();
child.wait().unwrap();
std::thread::sleep(std::time::Duration::from_millis(100));
pid
}
#[test]
fn test_umask_chdir() {
let tmpdir = TempDir::new("chdir").unwrap();
// third argument is the umask: 255 == 0o377
let args = vec![tmpdir.path().to_str().unwrap(), "test", "255"];
run("target/debug/examples/test_chdir", &args);
let filename = tmpdir.path().join("test");
let mut data = Vec::new();
std::fs::File::open(&filename).unwrap().read_to_end(&mut data).unwrap();
assert!(data == b"test");
// due to the umask, the file should have been created with -w
assert!(filename.metadata().unwrap().permissions().readonly());
}
#[test]
fn test_pid() {
let tmpdir = TempDir::new("chdir").unwrap();
let pid_file = tmpdir.path().join("pid");
let args = vec![pid_file.to_str().unwrap()];
let child_pid = run("target/debug/examples/test_pid", &args);
let mut data = String::new();
std::fs::File::open(&pid_file).unwrap().read_to_string(&mut data).unwrap();
let pid: u32 = data.parse().unwrap();
assert!(pid != child_pid)
}
#[test]
fn double_run() {
let tmpdir = TempDir::new("double_run").unwrap();
let pid_file = tmpdir.path().join("pid");
let first_result = tmpdir.path().join("first");
let second_result = tmpdir.path().join("second");
for file in vec![&first_result, &second_result] {
let args = vec![pid_file.to_str().unwrap(), file.to_str().unwrap()];
run("target/debug/examples/test_double_run", &args);
}
std::thread::sleep(std::time::Duration::from_millis(100));
{
let mut data = String::new();
std::fs::File::open(&first_result).unwrap().read_to_string(&mut data).unwrap();
assert!(data == "ok")
}
{
let mut data = String::new();
std::fs::File::open(&second_result).unwrap().read_to_string(&mut data).unwrap();
assert!(data == "error")
}
}
#[test]
#[cfg(target_os = "macos")]
fn test_uid_gid() {
let tmpdir = TempDir::new("uid_gid").unwrap();
let result_file = tmpdir.path().join("result");
let args = vec!["nobody", "daemon", &result_file.to_str().unwrap()];
run("target/debug/examples/test_uid_gid", &args);
let own_uid_gid_string = unsafe { format!("{} {}", libc::getuid(), libc::getgid()) };
let mut data = String::new();
std::fs::File::open(&result_file).unwrap().read_to_string(&mut data).unwrap();
assert!(!data.is_empty());
assert!(data != own_uid_gid_string)
}
| 30.308511 | 89 | 0.611092 |
0ea487dd2b2d33604a3c47ac2043611e5bd044d4 | 3,020 | //! @ A glue specification has a halfword reference count in its first word,
//! @^reference counts@>
//! representing |null| plus the number of glue nodes that point to it (less one).
//! Note that the reference count appears in the same position as
//! the |link| field in list nodes; this is the field that is initialized
//! to |null| when a node is allocated, and it is also the field that is flagged
//! by |empty_flag| in empty nodes.
//!
//! Glue specifications also contain three |scaled| fields, for the |width|,
//! |stretch|, and |shrink| dimensions. Finally, there are two one-byte
//! fields called |stretch_order| and |shrink_order|; these contain the
//! orders of infinity (|normal|, |fil|, |fill|, or |filll|)
//! corresponding to the stretch and shrink values.
//
// @d glue_spec_size=4 {number of words to allocate for a glue specification}
/// number of words to allocate for a glue specification
pub(crate) const glue_spec_size: quarterword = 4;
// @d glue_ref_count(#) == link(#) {reference count of a glue specification}
/// reference count of a glue specification
pub(crate) macro glue_ref_count($globals:expr, $ptr:expr) {
crate::section_0118::link!($globals, $ptr)
}
// @d stretch(#) == mem[#+2].sc {the stretchability of this glob of glue}
/// the stretchability of this glob of glue
pub(crate) macro stretch($globals:expr, $ptr:expr) {
$globals.mem[$ptr + 2][crate::section_0101::MEMORY_WORD_SC]
}
// @d shrink(#) == mem[#+3].sc {the shrinkability of this glob of glue}
/// the shrinkability of this glob of glue
pub(crate) macro shrink($globals:expr, $ptr:expr) {
$globals.mem[$ptr + 3][crate::section_0101::MEMORY_WORD_SC]
}
// @d stretch_order == type {order of infinity for stretching}
/// order of infinity for stretching
pub(crate) macro stretch_order($globals:expr, $ptr:expr) {
crate::section_0133::r#type!($globals, $ptr)
}
// @d shrink_order == subtype {order of infinity for shrinking}
/// order of infinity for shrinking
pub(crate) macro shrink_order($globals:expr, $ptr:expr) {
crate::section_0133::subtype!($globals, $ptr)
}
// @d fil=1 {first-order infinity}
// @d fill=2 {second-order infinity}
// @d filll=3 {third-order infinity}
//
// @<Types...@>=
// @!glue_ord=normal..filll; {infinity to the 0, 1, 2, or 3 power}
/// infinity to the 0, 1, 2, or 3 power
#[derive(Clone, Copy, PartialEq)]
pub(crate) enum glue_ord {
normal = 0,
/// first-order infinity
fil = 1,
/// second-order infinity
fill = 2,
/// third-order infinity
filll = 3,
}
pub(crate) type normal_TYPENUM = U0;
pub(crate) type filll_TYPENUM = U3;
impl glue_ord {
pub(crate) fn get(self) -> u8 {
self as u8
}
}
impl From<u8> for glue_ord {
fn from(val: u8) -> glue_ord {
match val {
0 => glue_ord::normal,
1 => glue_ord::fil,
2 => glue_ord::fill,
3 => glue_ord::filll,
_ => unreachable!(),
}
}
}
use crate::section_0113::quarterword;
use typenum::{U0, U3};
| 35.529412 | 82 | 0.66457 |
9b3eb741829faf1bc53a9f311bbf42efedbf6860 | 125,204 | use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::BTreeMap;
use easy_ll;
use weld_common::WeldRuntimeErrno;
use super::ast::*;
use super::ast::Type::*;
use super::ast::LiteralKind::*;
use super::ast::ScalarKind::*;
use super::ast::BuilderKind::*;
use super::code_builder::CodeBuilder;
use super::error::*;
use super::macro_processor;
use super::pretty_print::*;
use super::program::Program;
use super::sir;
use super::sir::*;
use super::sir::Statement::*;
use super::sir::Terminator::*;
use super::transforms;
use super::type_inference;
use super::util::IdGenerator;
use super::util::MERGER_BC;
#[cfg(test)]
use super::parser::*;
static PRELUDE_CODE: &'static str = include_str!("resources/prelude.ll");
static VECTOR_CODE: &'static str = include_str!("resources/vector.ll");
static MERGER_CODE: &'static str = include_str!("resources/merger.ll");
static DICTIONARY_CODE: &'static str = include_str!("resources/dictionary.ll");
static DICTMERGER_CODE: &'static str = include_str!("resources/dictmerger.ll");
/// Generates LLVM code for one or more modules.
pub struct LlvmGenerator {
/// Track a unique name of the form %s0, %s1, etc for each struct generated.
struct_names: HashMap<Vec<Type>, String>,
struct_ids: IdGenerator,
/// Track a unique name of the form %v0, %v1, etc for each vec generated.
vec_names: HashMap<Type, String>,
vec_ids: IdGenerator,
merger_names: HashMap<Type, String>,
merger_ids: IdGenerator,
/// Tracks a unique name of the form %d0, %d1, etc for each dict generated.
dict_names: HashMap<Type, String>,
dict_ids: IdGenerator,
/// TODO This is unnecessary but satisfies the compiler for now.
bld_names: HashMap<BuilderKind, String>,
/// A CodeBuilder for prelude functions such as type and struct definitions.
prelude_code: CodeBuilder,
prelude_var_ids: IdGenerator,
/// A CodeBuilder for body functions in the module.
body_code: CodeBuilder,
visited: HashSet<sir::FunctionId>,
}
/// A wrapper for a struct passed as input to the Weld runtime.
#[derive(Clone, Debug)]
#[repr(C)]
pub struct WeldInputArgs {
pub input: i64,
pub nworkers: i32,
pub mem_limit: i64,
}
/// A wrapper for outputs passed out of the Weld runtime.
#[derive(Clone, Debug)]
#[repr(C)]
pub struct WeldOutputArgs {
pub output: i64,
pub run_id: i64,
pub errno: WeldRuntimeErrno,
}
fn get_combined_params(sir: &SirProgram, par_for: &ParallelForData) -> HashMap<Symbol, Type> {
let mut body_params = sir.funcs[par_for.body].params.clone();
for (arg, ty) in sir.funcs[par_for.cont].params.iter() {
body_params.insert(arg.clone(), ty.clone());
}
body_params
}
fn get_sym_ty<'a>(func: &'a SirFunction, sym: &Symbol) -> WeldResult<&'a Type> {
if func.locals.get(sym).is_some() {
Ok(func.locals.get(sym).unwrap())
} else if func.params.get(sym).is_some() {
Ok(func.params.get(sym).unwrap())
} else {
weld_err!("Can't find symbol {}#{}", sym.name, sym.id)
}
}
impl LlvmGenerator {
pub fn new() -> LlvmGenerator {
let mut generator = LlvmGenerator {
struct_names: HashMap::new(),
struct_ids: IdGenerator::new("%s"),
vec_names: HashMap::new(),
vec_ids: IdGenerator::new("%v"),
merger_names: HashMap::new(),
merger_ids: IdGenerator::new("%m"),
dict_names: HashMap::new(),
dict_ids: IdGenerator::new("%d"),
bld_names: HashMap::new(),
prelude_code: CodeBuilder::new(),
prelude_var_ids: IdGenerator::new("%p.p"),
body_code: CodeBuilder::new(),
visited: HashSet::new(),
};
generator.prelude_code.add(PRELUDE_CODE);
generator.prelude_code.add("\n");
generator
}
/// Return all the code generated so far.
pub fn result(&mut self) -> String {
format!("; PRELUDE:\n\n{}\n; BODY:\n\n{}",
self.prelude_code.result(),
self.body_code.result())
}
fn get_arg_str(&mut self, params: &HashMap<Symbol, Type>, suffix: &str) -> WeldResult<String> {
let mut arg_types = String::new();
let params_sorted: BTreeMap<&Symbol, &Type> = params.iter().collect();
for (arg, ty) in params_sorted.iter() {
let arg_str = format!("{} {}{}, ",
try!(self.llvm_type(&ty)),
llvm_symbol(&arg),
suffix);
arg_types.push_str(&arg_str);
}
arg_types.push_str("%work_t* %cur.work");
Ok(arg_types)
}
fn unload_arg_struct(&mut self,
params: &HashMap<Symbol, Type>,
ctx: &mut FunctionContext)
-> WeldResult<()> {
let params_sorted: BTreeMap<&Symbol, &Type> = params.iter().collect();
let ll_ty = try!(self.llvm_type(
&Struct(params_sorted.iter().map(|p| p.1.clone()).cloned().collect())));
let storage_typed = ctx.var_ids.next();
let storage = ctx.var_ids.next();
let work_data_ptr = ctx.var_ids.next();
let work_data = ctx.var_ids.next();
ctx.code.add(format!("{} = getelementptr %work_t, %work_t* %cur.work, i32 0, i32 0",
work_data_ptr));
ctx.code.add(format!("{} = load i8*, i8** {}", work_data, work_data_ptr));
ctx.code.add(format!("{} = bitcast i8* {} to {}*",
storage_typed,
work_data,
ll_ty));
ctx.code.add(format!("{} = load {}, {}* {}", storage, ll_ty, ll_ty, storage_typed));
for (i, (arg, _)) in params_sorted.iter().enumerate() {
ctx.code.add(format!("{} = extractvalue {} {}, {}",
llvm_symbol(arg),
ll_ty,
storage,
i));
}
Ok(())
}
fn create_new_pieces(&mut self,
params: &HashMap<Symbol, Type>,
ctx: &mut FunctionContext)
-> WeldResult<()> {
let full_task_ptr = ctx.var_ids.next();
let full_task_int = ctx.var_ids.next();
let full_task_bit = ctx.var_ids.next();
ctx.code
.add(format!("{} = getelementptr %work_t, %work_t* %cur.work, i32 0, i32 4",
full_task_ptr));
ctx.code.add(format!("{} = load i32, i32* {}", full_task_int, full_task_ptr));
ctx.code.add(format!("{} = trunc i32 {} to i1", full_task_bit, full_task_int));
ctx.code.add(format!("br i1 {}, label %new_pieces, label %fn_call", full_task_bit));
ctx.code.add("new_pieces:");
let params_sorted: BTreeMap<&Symbol, &Type> = params.iter().collect();
for (arg, ty) in params_sorted.iter() {
match **ty {
Builder(ref bk) => {
match *bk {
Appender(_) => {
let bld_ty_str = try!(self.llvm_type(ty)).to_string();
let bld_prefix = format!("@{}", bld_ty_str.replace("%", ""));
ctx.code
.add(format!("call void {}.newPiece({} {}, %work_t* %cur.work)",
bld_prefix,
bld_ty_str,
llvm_symbol(arg)));
}
_ => {}
}
}
_ => {}
}
}
ctx.code.add("br label %fn_call");
Ok(())
}
fn get_arg_struct(&mut self,
params: &HashMap<Symbol, Type>,
ctx: &mut FunctionContext)
-> WeldResult<String> {
let params_sorted: BTreeMap<&Symbol, &Type> = params.iter().collect();
let mut prev_ref = String::from("undef");
let ll_ty = try!(self.llvm_type(
&Struct(params_sorted.iter().map(|p| p.1.clone()).cloned().collect())))
.to_string();
for (i, (arg, ty)) in params_sorted.iter().enumerate() {
let next_ref = ctx.var_ids.next();
ctx.code.add(format!("{} = insertvalue {} {}, {} {}, {}",
next_ref,
ll_ty,
prev_ref,
try!(self.llvm_type(&ty)),
llvm_symbol(arg),
i));
prev_ref.clear();
prev_ref.push_str(&next_ref);
}
let struct_size_ptr = ctx.var_ids.next();
let struct_size = ctx.var_ids.next();
let struct_storage = ctx.var_ids.next();
let struct_storage_typed = ctx.var_ids.next();
ctx.code.add(format!("{} = getelementptr {}, {}* null, i32 1",
struct_size_ptr,
ll_ty,
ll_ty));
ctx.code.add(format!("{} = ptrtoint {}* {} to i64",
struct_size,
ll_ty,
struct_size_ptr));
// we use regular malloc here because this pointer will always be freed by parlib
ctx.code
.add(format!("{} = call i8* @malloc(i64 {})", struct_storage, struct_size));
ctx.code.add(format!("{} = bitcast i8* {} to {}*",
struct_storage_typed,
struct_storage,
ll_ty));
ctx.code.add(format!("store {} {}, {}* {}",
ll_ty,
prev_ref,
ll_ty,
struct_storage_typed));
Ok(struct_storage)
}
/// Add a function to the generated program.
pub fn add_function(&mut self,
sir: &SirProgram,
func: &SirFunction,
// non-None only if func is loop body
containing_loop: Option<ParallelForData>)
-> WeldResult<()> {
if !self.visited.insert(func.id) {
return Ok(());
}
{
let mut ctx = &mut FunctionContext::new();
let mut arg_types = try!(self.get_arg_str(&func.params, ".in"));
if containing_loop.is_some() {
arg_types.push_str(", i64 %lower.idx, i64 %upper.idx");
}
// Start the entry block by defining the function and storing all its arguments on the
// stack (this makes them consistent with other local variables). Later, expressions may
// add more local variables to alloca_code.
ctx.alloca_code.add(format!("define void @f{}({}) {{", func.id, arg_types));
ctx.alloca_code.add(format!("fn.entry:"));
for (arg, ty) in func.params.iter() {
let arg_str = llvm_symbol(&arg);
let ty_str = try!(self.llvm_type(&ty)).to_string();
try!(ctx.add_alloca(&arg_str, &ty_str));
ctx.code.add(format!("store {} {}.in, {}* {}", ty_str, arg_str, ty_str, arg_str));
}
for (arg, ty) in func.locals.iter() {
let arg_str = llvm_symbol(&arg);
let ty_str = try!(self.llvm_type(&ty)).to_string();
try!(ctx.add_alloca(&arg_str, &ty_str));
}
ctx.code.add(format!("%cur.tid = call i32 @my_id_public()"));
if containing_loop.is_some() {
let par_for = containing_loop.clone().unwrap();
let bld_ty_str = try!(self.llvm_type(func.params.get(&par_for.builder).unwrap()))
.to_string();
let bld_param_str = llvm_symbol(&par_for.builder);
let bld_arg_str = llvm_symbol(&par_for.builder_arg);
ctx.code.add(format!("store {} {}.in, {}* {}",
&bld_ty_str,
bld_param_str,
&bld_ty_str,
bld_arg_str));
try!(ctx.add_alloca("%cur.idx", "i64"));
ctx.code.add("store i64 %lower.idx, i64* %cur.idx");
ctx.code.add("br label %loop.start");
ctx.code.add("loop.start:");
let idx_tmp = try!(self.load_var("%cur.idx", "i64", ctx));
if !par_for.innermost {
let work_idx_ptr = ctx.var_ids.next();
ctx.code
.add(format!("{} = getelementptr %work_t, %work_t* %cur.work, i32 0, \
i32 3",
work_idx_ptr));
ctx.code.add(format!("store i64 {}, i64* {}", idx_tmp, work_idx_ptr));
}
let idx_cmp = ctx.var_ids.next();
ctx.code.add(format!("{} = icmp ult i64 {}, %upper.idx", idx_cmp, idx_tmp));
ctx.code.add(format!("br i1 {}, label %loop.body, label %loop.end", idx_cmp));
ctx.code.add("loop.body:");
let mut prev_ref = String::from("undef");
let elem_ty = func.locals.get(&par_for.data_arg).unwrap();
let elem_ty_str = try!(self.llvm_type(&elem_ty)).to_string();
for (i, iter) in par_for.data.iter().enumerate() {
let data_ty_str = try!(self.llvm_type(func.params.get(&iter.data).unwrap()))
.to_string();
let data_str =
try!(self.load_var(llvm_symbol(&iter.data).as_str(), &data_ty_str, ctx));
let data_prefix = format!("@{}", data_ty_str.replace("%", ""));
let inner_elem_tmp_ptr = ctx.var_ids.next();
let inner_elem_ty_str = if par_for.data.len() == 1 {
elem_ty_str.clone()
} else {
match *elem_ty {
Struct(ref v) => try!(self.llvm_type(&v[i])).to_string(),
_ => {
weld_err!("Internal error: invalid element type {}",
print_type(elem_ty))?
}
}
};
let arr_idx = if iter.start.is_some() {
let offset = ctx.var_ids.next();
let stride_str = try!(self.load_var(
llvm_symbol(&iter.stride.clone().unwrap()).as_str(), "i64", ctx));
let start_str = try!(self.load_var(
llvm_symbol(&iter.start.clone().unwrap()).as_str(), "i64", ctx));
ctx.code.add(format!("{} = mul i64 {}, {}", offset, idx_tmp, stride_str));
let final_idx = ctx.var_ids.next();
ctx.code.add(format!("{} = add i64 {}, {}", final_idx, start_str, offset));
final_idx
} else {
idx_tmp.clone()
};
ctx.code.add(format!("{} = call {}* {}.at({} {}, i64 {})",
inner_elem_tmp_ptr,
&inner_elem_ty_str,
data_prefix,
&data_ty_str,
data_str,
arr_idx));
let inner_elem_tmp =
try!(self.load_var(&inner_elem_tmp_ptr, &inner_elem_ty_str, ctx));
if par_for.data.len() == 1 {
prev_ref.clear();
prev_ref.push_str(&inner_elem_tmp);
} else {
let elem_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = insertvalue {} {}, {} {}, {}",
elem_tmp,
elem_ty_str,
prev_ref,
inner_elem_ty_str,
inner_elem_tmp,
i));
prev_ref.clear();
prev_ref.push_str(&elem_tmp);
}
}
let elem_str = llvm_symbol(&par_for.data_arg);
ctx.code.add(format!("store {} {}, {}* {}",
&elem_ty_str,
prev_ref,
&elem_ty_str,
elem_str));
ctx.code.add(format!("store i64 {}, i64* {}",
idx_tmp,
llvm_symbol(&par_for.idx_arg)));
}
ctx.code.add(format!("br label %b.b{}", func.blocks[0].id));
// Generate an expression for the function body.
try!(self.gen_func(sir, func, ctx));
ctx.code.add("body.end:");
if containing_loop.is_some() {
ctx.code.add("br label %loop.terminator");
ctx.code.add("loop.terminator:");
let idx_tmp = try!(self.load_var("%cur.idx", "i64", ctx));
let idx_inc = ctx.var_ids.next();
ctx.code.add(format!("{} = add i64 {}, 1", idx_inc, idx_tmp));
ctx.code.add(format!("store i64 {}, i64* %cur.idx", idx_inc));
ctx.code.add("br label %loop.start");
ctx.code.add("loop.end:");
}
ctx.code.add("ret void");
ctx.code.add("}\n\n");
self.body_code.add(&ctx.alloca_code.result());
self.body_code.add(&ctx.code.result());
}
if containing_loop.is_some() {
let par_for = containing_loop.clone().unwrap();
{
let mut wrap_ctx = &mut FunctionContext::new();
let serial_arg_types =
try!(self.get_arg_str(&get_combined_params(sir, &par_for), ""));
wrap_ctx.code
.add(format!("define void @f{}_wrapper({}) {{", func.id, serial_arg_types));
wrap_ctx.code.add(format!("fn.entry:"));
// Use the first data to compute the indexing.
let first_data = &par_for.data[0].data;
let data_str = llvm_symbol(&first_data);
let data_ty_str = try!(self.llvm_type(func.params.get(&first_data).unwrap()))
.to_string();
let data_prefix = format!("@{}", data_ty_str.replace("%", ""));
let num_iters_str = wrap_ctx.var_ids.next();
if par_for.data[0].start.is_none() {
// set num_iters_str to len(first_data)
wrap_ctx.code.add(format!("{} = call i64 {}.size({} {})",
num_iters_str,
data_prefix,
data_ty_str,
data_str));
} else {
// set num_iters_str to (end - start) / stride
let start_str = llvm_symbol(&par_for.data[0].start.clone().unwrap());
let end_str = llvm_symbol(&par_for.data[0].end.clone().unwrap());
let stride_str = llvm_symbol(&par_for.data[0].stride.clone().unwrap());
let diff_tmp = wrap_ctx.var_ids.next();
wrap_ctx.code
.add(format!("{} = sub i64 {}, {}", diff_tmp, end_str, start_str));
wrap_ctx.code
.add(format!("{} = udiv i64 {}, {}", num_iters_str, diff_tmp, stride_str));
}
// Perform a bounds check on each of the data items before launching the loop
for iter in par_for.data.iter() {
// Vector LLVM information for the current iter.
let data_str = llvm_symbol(&iter.data);
let data_ty_str = try!(self.llvm_type(func.params.get(&iter.data).unwrap()))
.to_string();
let data_prefix = format!("@{}", data_ty_str.replace("%", ""));
let vec_size_str = wrap_ctx.var_ids.next();
wrap_ctx.code.add(format!("{} = call i64 {}.size({} {})",
vec_size_str,
data_prefix,
data_ty_str,
data_str));
let (start_str, stride_str) = if iter.start.is_none() {
let start_str = "0".to_string();
let stride_str = "1".to_string();
(start_str, stride_str)
} else {
(llvm_symbol(iter.start.as_ref().unwrap()),
llvm_symbol(iter.stride.as_ref().unwrap()))
};
let t0 = wrap_ctx.var_ids.next();
let t1 = wrap_ctx.var_ids.next();
let t2 = wrap_ctx.var_ids.next();
let cond = wrap_ctx.var_ids.next();
let next_bounds_check_label = wrap_ctx.var_ids.next();
// t0 = mul i64 num_iters, 1
// t1 = mul i64 stride, t0
// t2 = add i64 t1, start
// cond = icmp lte i64 t1, size
// br i1 cond, label %nextCheck, label %checkFailed
// nextCheck:
// (loop)
wrap_ctx.code
.add(format!("{} = sub i64 {}, 1", t0, num_iters_str));
wrap_ctx.code
.add(format!("{} = mul i64 {}, {}", t1, stride_str, t0));
wrap_ctx.code
.add(format!("{} = add i64 {}, {}", t2, t1, start_str));
wrap_ctx.code
.add(format!("{} = icmp ult i64 {}, {}", cond, t2, vec_size_str));
wrap_ctx.code
.add(format!("br i1 {}, label {}, label %fn.boundcheckfailed",
cond,
next_bounds_check_label));
wrap_ctx.code.add(format!("{}:", next_bounds_check_label.replace("%", "")));
}
// If we get here, the bounds check passed.
wrap_ctx.code.add(format!("br label %fn.boundcheckpassed"));
// Handle a bounds check fail.
wrap_ctx.code.add(format!("fn.boundcheckfailed:"));
let errno = WeldRuntimeErrno::BadIteratorLength;
let run_id = wrap_ctx.var_ids.next();
wrap_ctx.code.add(format!("{} = call i64 @get_runid()", run_id));
wrap_ctx.code.add(format!("call void @weld_rt_set_errno(i64 {}, i64 {})",
run_id,
errno as i64));
wrap_ctx.code.add(format!("call void @weld_abort_thread()"));
wrap_ctx.code.add(format!("; Unreachable!"));
wrap_ctx.code.add(format!("br label %fn.end"));
wrap_ctx.code.add(format!("fn.boundcheckpassed:"));
let bound_cmp = wrap_ctx.var_ids.next();
let mut grain_size = 4096;
if par_for.innermost {
wrap_ctx.code.add(format!("{} = icmp ule i64 {}, {}",
bound_cmp,
num_iters_str,
grain_size));
wrap_ctx.code
.add(format!("br i1 {}, label %for.ser, label %for.par", bound_cmp));
wrap_ctx.code.add(format!("for.ser:"));
let mut body_arg_types = try!(self.get_arg_str(&func.params, ""));
body_arg_types.push_str(format!(", i64 0, i64 {}", num_iters_str).as_str());
wrap_ctx.code.add(format!("call void @f{}({})", func.id, body_arg_types));
let cont_arg_types =
try!(self.get_arg_str(&sir.funcs[par_for.cont].params, ""));
wrap_ctx.code.add(format!("call void @f{}({})", par_for.cont, cont_arg_types));
wrap_ctx.code.add(format!("br label %fn.end"));
} else {
wrap_ctx.code.add("br label %for.par");
grain_size = 1;
}
wrap_ctx.code.add(format!("for.par:"));
let body_struct = try!(self.get_arg_struct(&func.params, &mut wrap_ctx));
let cont_struct =
try!(self.get_arg_struct(&sir.funcs[par_for.cont].params, &mut wrap_ctx));
wrap_ctx.code
.add(format!("call void @pl_start_loop(%work_t* %cur.work, i8* {}, i8* {}, \
void (%work_t*)* @f{}_par, void (%work_t*)* @f{}_par, i64 0, \
i64 {}, i32 {})",
body_struct,
cont_struct,
func.id,
par_for.cont,
num_iters_str,
grain_size));
wrap_ctx.code.add(format!("br label %fn.end"));
wrap_ctx.code.add("fn.end:");
wrap_ctx.code.add("ret void");
wrap_ctx.code.add("}\n\n");
self.body_code.add(&wrap_ctx.code.result());
}
{
let mut par_body_ctx = &mut FunctionContext::new();
par_body_ctx.code
.add(format!("define void @f{}_par(%work_t* %cur.work) {{", func.id));
par_body_ctx.code.add("entry:");
try!(self.unload_arg_struct(&func.params, &mut par_body_ctx));
let lower_bound_ptr = par_body_ctx.var_ids.next();
let lower_bound = par_body_ctx.var_ids.next();
let upper_bound_ptr = par_body_ctx.var_ids.next();
let upper_bound = par_body_ctx.var_ids.next();
par_body_ctx.code
.add(format!("{} = getelementptr %work_t, %work_t* %cur.work, i32 0, i32 1",
lower_bound_ptr));
par_body_ctx.code
.add(format!("{} = load i64, i64* {}", lower_bound, lower_bound_ptr));
par_body_ctx.code
.add(format!("{} = getelementptr %work_t, %work_t* %cur.work, i32 0, i32 2",
upper_bound_ptr));
par_body_ctx.code
.add(format!("{} = load i64, i64* {}", upper_bound, upper_bound_ptr));
let body_arg_types = try!(self.get_arg_str(&func.params, ""));
try!(self.create_new_pieces(&func.params, &mut par_body_ctx));
par_body_ctx.code.add("fn_call:");
par_body_ctx.code.add(format!("call void @f{}({}, i64 {}, i64 {})",
func.id,
body_arg_types,
lower_bound,
upper_bound));
par_body_ctx.code.add("ret void");
par_body_ctx.code.add("}\n\n");
self.body_code.add(&par_body_ctx.code.result());
}
{
let mut par_cont_ctx = &mut FunctionContext::new();
par_cont_ctx.code
.add(format!("define void @f{}_par(%work_t* %cur.work) {{", par_for.cont));
par_cont_ctx.code.add("entry:");
try!(self.unload_arg_struct(&sir.funcs[par_for.cont].params, &mut par_cont_ctx));
try!(self.create_new_pieces(&sir.funcs[par_for.cont].params, &mut par_cont_ctx));
par_cont_ctx.code.add("fn_call:");
let cont_arg_types = try!(self.get_arg_str(&sir.funcs[par_for.cont].params, ""));
par_cont_ctx.code.add(format!("call void @f{}({})", par_for.cont, cont_arg_types));
par_cont_ctx.code.add("ret void");
par_cont_ctx.code.add("}\n\n");
self.body_code.add(&par_cont_ctx.code.result());
}
}
if func.id == 0 {
let mut par_top_ctx = &mut FunctionContext::new();
par_top_ctx.code.add("define void @f0_par(%work_t* %cur.work) {");
try!(self.unload_arg_struct(&sir.funcs[0].params, &mut par_top_ctx));
let top_arg_types = try!(self.get_arg_str(&sir.funcs[0].params, ""));
par_top_ctx.code.add(format!("call void @f0({})", top_arg_types));
par_top_ctx.code.add("ret void");
par_top_ctx.code.add("}\n\n");
self.body_code.add(&par_top_ctx.code.result());
}
Ok(())
}
/// Add a function to the generated program, passing its parameters and return value through
/// pointers encoded as i64. This is used for the main entry point function into Weld modules
/// to pass them arbitrary structures.
pub fn add_function_on_pointers(&mut self, name: &str, sir: &SirProgram) -> WeldResult<()> {
// First add the function on raw values, which we'll call from the pointer version.
try!(self.add_function(sir, &sir.funcs[0], None));
// Define a struct with all the argument types as fields
let args_struct = Struct(sir.top_params.iter().map(|a| a.ty.clone()).collect());
let args_type = try!(self.llvm_type(&args_struct)).to_string();
let mut run_ctx = &mut FunctionContext::new();
run_ctx.code.add(format!("define i64 @{}(i64 %r.input) {{", name));
// Unpack the input, which is always struct defined by the type %input_arg_t in prelude.ll.
run_ctx.code.add(format!("%r.inp_typed = inttoptr i64 %r.input to %input_arg_t*"));
run_ctx.code.add(format!("%r.inp_val = load %input_arg_t, %input_arg_t* %r.inp_typed"));
run_ctx.code.add(format!("%r.args = extractvalue %input_arg_t %r.inp_val, 0"));
run_ctx.code.add(format!("%r.nworkers = extractvalue %input_arg_t %r.inp_val, 1"));
run_ctx.code.add(format!("%r.memlimit = extractvalue %input_arg_t %r.inp_val, 2"));
run_ctx.code.add(format!("call void @set_nworkers(i32 %r.nworkers)"));
run_ctx.code.add(format!("call void @weld_rt_init(i64 %r.memlimit)"));
// Code to load args and call function
run_ctx.code.add(format!("%r.args_typed = inttoptr i64 %r.args to {args_type}*
\
%r.args_val = load {args_type}, {args_type}* %r.args_typed",
args_type = args_type));
let mut arg_pos_map: HashMap<Symbol, usize> = HashMap::new();
for (i, a) in sir.top_params.iter().enumerate() {
arg_pos_map.insert(a.name.clone(), i);
}
for (arg, _) in sir.funcs[0].params.iter() {
let idx = arg_pos_map.get(arg).unwrap();
run_ctx.code.add(format!("{} = extractvalue {} %r.args_val, {}",
llvm_symbol(arg),
args_type,
idx));
}
let run_struct = try!(self.get_arg_struct(&sir.funcs[0].params, &mut run_ctx));
let rid = run_ctx.var_ids.next();
let errno = run_ctx.var_ids.next();
let tmp0 = run_ctx.var_ids.next();
let tmp1 = run_ctx.var_ids.next();
let tmp2 = run_ctx.var_ids.next();
let size_ptr = run_ctx.var_ids.next();
let size = run_ctx.var_ids.next();
let bytes = run_ctx.var_ids.next();
let typed_out_ptr = run_ctx.var_ids.next();
let final_address = run_ctx.var_ids.next();
run_ctx.code.add(format!("call \
void @execute(void (%work_t*)* @f0_par, i8* {run_struct})
%res_ptr = call i8* @get_result()
%res_address = ptrtoint i8* %res_ptr to i64
\
{rid} = call i64 @get_runid()
{errno} = call i64 @weld_rt_get_errno(i64 {rid})
{tmp0} = insertvalue %output_arg_t undef, i64 %res_address, 0
{tmp1} = insertvalue %output_arg_t {tmp0}, i64 {rid}, 1
{tmp2} = insertvalue %output_arg_t {tmp1}, i64 {errno}, 2
{size_ptr} = getelementptr %output_arg_t, %output_arg_t* null, i32 1
{size} = ptrtoint %output_arg_t* {size_ptr} to i64
{bytes} = call i8* @malloc(i64 {size})
{typed_out_ptr} = bitcast i8* {bytes} to %output_arg_t*
store %output_arg_t {tmp2}, %output_arg_t* {typed_out_ptr}
{final_address} = ptrtoint %output_arg_t* {typed_out_ptr} to i64
ret i64 {final_address}",
run_struct = run_struct,
rid = rid,
errno = errno,
tmp0 = tmp0,
tmp1 = tmp1,
tmp2 = tmp2,
size_ptr = size_ptr,
size = size,
bytes = bytes,
typed_out_ptr = typed_out_ptr,
final_address = final_address));
run_ctx.code.add("}\n\n");
self.body_code.add(&run_ctx.code.result());
Ok(())
}
/// Return the LLVM type name corresponding to a Weld type.
fn llvm_type(&mut self, ty: &Type) -> WeldResult<&str> {
match *ty {
Scalar(Bool) => Ok("i1"),
Scalar(I8) => Ok("i8"),
Scalar(I32) => Ok("i32"),
Scalar(I64) => Ok("i64"),
Scalar(F32) => Ok("float"),
Scalar(F64) => Ok("double"),
Struct(ref fields) => {
if self.struct_names.get(fields) == None {
// Declare the struct in prelude_code
let name = self.struct_ids.next();
let mut field_types: Vec<String> = Vec::new();
for f in fields {
field_types.push(try!(self.llvm_type(f)).to_string());
}
let field_types_str = field_types.join(", ");
self.prelude_code.add(format!("{} = type {{ {} }}", name, field_types_str));
// Generate hash function for the struct.
self.prelude_code.add_line(format!("define i64 {}.hash({} %value) {{",
name.replace("%", "@"),
name));
let mut res = "0".to_string();
for i in 0..field_types.len() {
let field = self.prelude_var_ids.next();
let hash = self.prelude_var_ids.next();
let new_res = self.prelude_var_ids.next();
let field_ty_str = &field_types[i];
let field_prefix_str = format!("@{}", field_ty_str.replace("%", ""));
self.prelude_code
.add_line(format!("{} = extractvalue {} %value, {}", field, name, i));
self.prelude_code.add_line(format!("{} = call i64 {}.hash({} {})",
hash,
field_prefix_str,
field_ty_str,
field));
self.prelude_code
.add_line(format!("{} = call i64 @hash_combine(i64 {}, i64 {})",
new_res,
res,
hash));
res = new_res;
}
self.prelude_code.add_line(format!("ret i64 {}", res));
self.prelude_code.add_line(format!("}}"));
self.prelude_code.add_line(format!(""));
self.prelude_code.add_line(format!("define i32 {}.cmp({} %a, {} %b) {{",
name.replace("%", "@"),
name,
name));
let mut label_ids = IdGenerator::new("%l");
for i in 0..field_types.len() {
let a_field = self.prelude_var_ids.next();
let b_field = self.prelude_var_ids.next();
let cmp = self.prelude_var_ids.next();
let ne = self.prelude_var_ids.next();
let field_ty_str = &field_types[i];
let ret_label = label_ids.next();
let post_label = label_ids.next();
let field_prefix_str = format!("@{}", field_ty_str.replace("%", ""));
self.prelude_code
.add_line(format!("{} = extractvalue {} %a , {}", a_field, name, i));
self.prelude_code
.add_line(format!("{} = extractvalue {} %b, {}", b_field, name, i));
self.prelude_code.add_line(format!("{} = call i32 {}.cmp({} {}, {} {})",
cmp,
field_prefix_str,
field_ty_str,
a_field,
field_ty_str,
b_field));
self.prelude_code.add_line(format!("{} = icmp ne i32 {}, 0", ne, cmp));
self.prelude_code.add_line(format!("br i1 {}, label {}, label {}",
ne,
ret_label,
post_label));
self.prelude_code.add_line(format!("{}:", ret_label.replace("%", "")));
self.prelude_code.add_line(format!("ret i32 {}", cmp));
self.prelude_code.add_line(format!("{}:", post_label.replace("%", "")));
}
self.prelude_code.add_line(format!("ret i32 0"));
self.prelude_code.add_line(format!("}}"));
self.prelude_code.add_line(format!(""));
// Add it into our map so we remember its name
self.struct_names.insert(fields.clone(), name);
}
Ok(self.struct_names.get(fields).unwrap())
}
Vector(ref elem) => {
if self.vec_names.get(elem) == None {
let elem_ty = try!(self.llvm_type(elem)).to_string();
let elem_prefix = format!("@{}", elem_ty.replace("%", ""));
let name = self.vec_ids.next();
self.vec_names.insert(*elem.clone(), name.clone());
let prefix_replaced = VECTOR_CODE.replace("$ELEM_PREFIX", &elem_prefix);
let elem_replaced = prefix_replaced.replace("$ELEM", &elem_ty);
let name_replaced = elem_replaced.replace("$NAME", &name.replace("%", ""));
self.prelude_code.add(&name_replaced);
self.prelude_code.add("\n");
}
Ok(self.vec_names.get(elem).unwrap())
}
Dict(ref key, ref value) => {
let elem = Box::new(Struct(vec![*key.clone(), *value.clone()]));
if self.dict_names.get(&elem) == None {
let key_ty = try!(self.llvm_type(key)).to_string();
let value_ty = try!(self.llvm_type(value)).to_string();
let key_prefix = format!("@{}", key_ty.replace("%", ""));
let name = self.dict_ids.next();
self.dict_names.insert(*elem.clone(), name.clone());
let kv_struct_ty = try!(self.llvm_type(&elem)).to_string();
let kv_vec = Box::new(Vector(elem.clone()));
let kv_vec_ty = try!(self.llvm_type(&kv_vec)).to_string();
let kv_vec_prefix = format!("@{}", &kv_vec_ty.replace("%", ""));
let key_prefix_replaced = DICTIONARY_CODE.replace("$KEY_PREFIX", &key_prefix);
let name_replaced =
key_prefix_replaced.replace("$NAME", &name.replace("%", ""));
let key_ty_replaced = name_replaced.replace("$KEY", &key_ty);
let value_ty_replaced = key_ty_replaced.replace("$VALUE", &value_ty);
let kv_struct_replaced = value_ty_replaced.replace("$KV_STRUCT", &kv_struct_ty);
let kv_vec_prefix_replaced =
kv_struct_replaced.replace("$KV_VEC_PREFIX", &kv_vec_prefix);
let kv_vec_ty_replaced = kv_vec_prefix_replaced.replace("$KV_VEC", &kv_vec_ty);
self.prelude_code.add(&kv_vec_ty_replaced);
self.prelude_code.add("\n");
}
Ok(self.dict_names.get(&elem).unwrap())
}
Builder(ref bk) => {
if self.bld_names.get(bk) == None {
match *bk {
Appender(ref t) => {
let bld_ty = Vector(t.clone());
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
self.bld_names.insert(bk.clone(), format!("{}.bld", bld_ty_str));
}
Merger(ref t, _) => {
if self.merger_names.get(t) == None {
let elem_ty = self.llvm_type(t)?.to_string();
let elem_prefix = format!("@{}", elem_ty.replace("%", ""));
let name = self.merger_ids.next();
self.merger_names
.insert(*t.clone(), name.clone());
let prefix_replaced =
MERGER_CODE.replace("$ELEM_PREFIX", &elem_prefix);
let elem_replaced = prefix_replaced.replace("$ELEM", &elem_ty);
let name_replaced =
elem_replaced.replace("$NAME", &name.replace("%", ""));
self.prelude_code.add(&name_replaced);
self.prelude_code.add("\n");
}
let bld_ty_str = self.merger_names.get(t).unwrap();
self.bld_names
.insert(bk.clone(), format!("{}.bld", bld_ty_str));
}
DictMerger(ref kt, ref vt, ref op) => {
let bld_ty = Dict(kt.clone(), vt.clone());
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
let elem = Box::new(Struct(vec![*kt.clone(), *vt.clone()]));
let kv_struct_ty = try!(self.llvm_type(&elem)).to_string();
let key_ty = try!(self.llvm_type(kt)).to_string();
let value_ty = try!(self.llvm_type(vt)).to_string();
let kv_vec = Box::new(Vector(elem.clone()));
let kv_vec_ty = try!(self.llvm_type(&kv_vec)).to_string();
let kv_vec_prefix = format!("@{}", &kv_vec_ty.replace("%", ""));
let name_replaced =
DICTMERGER_CODE.replace("$NAME", &bld_ty_str.replace("%", ""));
let key_ty_replaced = name_replaced.replace("$KEY", &key_ty);
let value_ty_replaced = key_ty_replaced.replace("$VALUE", &value_ty);
let kv_struct_replaced = value_ty_replaced.replace("$KV_STRUCT",
&kv_struct_ty.replace("%", ""));
let op_replaced =
kv_struct_replaced.replace("$OP", &llvm_binop(*op, vt)?);
let kv_vec_prefix_replaced =
op_replaced.replace("$KV_VEC_PREFIX", &kv_vec_prefix);
let kv_vec_ty_replaced =
kv_vec_prefix_replaced.replace("$KV_VEC", &kv_vec_ty);
self.prelude_code.add(&kv_vec_ty_replaced);
self.prelude_code.add("\n");
self.bld_names.insert(bk.clone(), format!("{}.bld", bld_ty_str));
}
VecMerger(ref elem, _) => {
let bld_ty = Vector(elem.clone());
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
self.bld_names.insert(bk.clone(), format!("{}.vm.bld", bld_ty_str));
}
}
}
Ok(self.bld_names.get(bk).unwrap())
}
_ => weld_err!("Unsupported type {}", print_type(ty))?,
}
}
fn load_var(&mut self, sym: &str, ty: &str, ctx: &mut FunctionContext) -> WeldResult<String> {
let var = ctx.var_ids.next();
ctx.code.add(format!("{} = load {}, {}* {}", var, ty, ty, sym));
Ok(var)
}
/// Given a pointer to a some data retrieved from a builder, generates code to merge a value
/// into the builder. The result should be stored back into the pointer to complete the merge.
/// `builder_ptr` is the pointer into which the original value is read and the new value will
/// be stored. `merge_value` is the value to merge in.
fn gen_merge(&mut self,
builder_ptr: String,
merge_value: String,
merge_ty_str: String,
bin_op: &BinOpKind,
merge_ty: &Type,
ctx: &mut FunctionContext)
-> WeldResult<()> {
let builder_value = ctx.var_ids.next();
let mut res = ctx.var_ids.next();
ctx.code.add(format!("{} = load {}, {}* {}",
&builder_value,
&merge_ty_str,
&merge_ty_str,
&builder_ptr));
if let Scalar(_) = *merge_ty {
ctx.code.add(format!("{} = {} {} {}, {}",
&res,
try!(llvm_binop(*bin_op, merge_ty)),
&merge_ty_str,
builder_value,
merge_value));
} else if let Struct(ref tys) = *merge_ty {
let mut cur = "undef".to_string();
for (i, ty) in tys.iter().enumerate() {
let merge_elem = ctx.var_ids.next();
let builder_elem = ctx.var_ids.next();
let struct_name = ctx.var_ids.next();
let binop_value = ctx.var_ids.next();
let elem_ty_str = try!(self.llvm_type(ty)).to_string();
ctx.code.add(format!("{} = extractvalue {} {}, {}",
&merge_elem,
&merge_ty_str,
&merge_value,
i));
ctx.code.add(format!("{} = extractvalue {} {}, {}",
&builder_elem,
&merge_ty_str,
&builder_value,
i));
ctx.code.add(format!("{} = {} {} {}, {}",
&binop_value,
try!(llvm_binop(*bin_op, ty)),
&elem_ty_str,
&merge_elem,
&builder_elem));
ctx.code.add(format!("{} = insertvalue {} {}, {} {}, {}",
&struct_name,
&merge_ty_str,
&cur,
&elem_ty_str,
&binop_value,
i));
res = struct_name.clone();
cur = struct_name.clone();
}
} else {
unreachable!();
}
// Store the resulting merge value back into the builder pointer.
ctx.code.add(format!("store {} {}, {}* {}",
&merge_ty_str,
&res,
&merge_ty_str,
&builder_ptr));
Ok(())
}
/// Add an expression to a CodeBuilder, possibly generating prelude code earlier, and return
/// a string that can be used to represent its result later (e.g. %var if introducing a local
/// variable or an integer constant otherwise).
fn gen_func(&mut self,
sir: &SirProgram,
func: &SirFunction,
ctx: &mut FunctionContext)
-> WeldResult<String> {
for b in func.blocks.iter() {
ctx.code.add(format!("b.b{}:", b.id));
for s in b.statements.iter() {
match *s {
MakeStruct { ref output, ref elems } => {
let mut cur = "undef".to_string();
let ll_ty = try!(self.llvm_type(&Struct(elems.iter()
.map(|e| e.1.clone())
.collect::<Vec<_>>())))
.to_string();
for (i, &(ref elem, ref ty)) in elems.iter().enumerate() {
let ll_elem_ty = try!(self.llvm_type(&ty)).to_string();
let tmp =
try!(self.load_var(llvm_symbol(&elem).as_str(), &ll_elem_ty, ctx));
let struct_name = ctx.var_ids.next();
ctx.code.add(format!("{} = insertvalue {} {}, {} {}, {}",
&struct_name,
&ll_ty,
&cur,
&ll_elem_ty,
&tmp,
i));
cur = struct_name.clone();
}
ctx.code.add(format!("store {} {}, {}* {}",
ll_ty,
cur,
ll_ty,
llvm_symbol(output)));
}
MakeVector { ref output, ref elems, ref elem_ty } => {
let elem_ll_ty = self.llvm_type(elem_ty)?.to_string();
let vec_ll_ty = self.llvm_type(&Vector(Box::new(elem_ty.clone())))?
.to_string();
let vec_ll_prefix = vec_ll_ty.replace("%", "@");
let vec = ctx.var_ids.next();
let capacity_str = format!("{}", elems.len());
ctx.code
.add(format!("{vec} = call {vec_type} {prefix}.new(i64 {capacity})",
vec = vec,
vec_type = vec_ll_ty,
prefix = vec_ll_prefix,
capacity = capacity_str));
for (i, elem) in elems.iter().enumerate() {
let e = self.load_var(llvm_symbol(&elem).as_str(), &elem_ll_ty, ctx)?
.to_string();
let ptr = ctx.var_ids.next();
let idx_str = format!("{}", i);
ctx.code.add(format!("{ptr} = call {elem_ty}* \
{prefix}.at({vec_type} {vec}, i64 {idx})",
ptr = ptr,
elem_ty = elem_ll_ty,
prefix = vec_ll_prefix,
vec_type = vec_ll_ty,
vec = vec,
idx = idx_str));
ctx.code.add(format!("store {elem_ty} {elem}, {elem_ty}* {ptr}",
elem_ty = elem_ll_ty,
elem = e,
ptr = ptr));
}
ctx.code.add(format!("store {vec_ty} {vec}, {vec_ty}* {output}",
vec_ty = vec_ll_ty,
vec = vec,
output = llvm_symbol(&output).as_str()));
}
BinOp { ref output, op, ref ty, ref left, ref right } => {
let ll_ty = try!(self.llvm_type(ty)).to_string();
let left_tmp = try!(self.load_var(llvm_symbol(left).as_str(), &ll_ty, ctx));
let right_tmp = try!(self.load_var(llvm_symbol(right).as_str(),
&ll_ty, ctx));
let bin_tmp = ctx.var_ids.next();
let out_ty = try!(get_sym_ty(func, output));
let out_ty_str = try!(self.llvm_type(&out_ty)).to_string();
match *ty {
Scalar(_) => {
let op_name = try!(llvm_binop(op, ty));
ctx.code.add(format!("{} = {} {} {}, {}",
bin_tmp,
op_name,
ll_ty,
left_tmp,
right_tmp));
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
bin_tmp,
out_ty_str,
llvm_symbol(output)));
}
Vector(_) => {
// We support BinOps between vectors as long as they're comparison operators
let (op_name, value) = try!(llvm_binop_vector(op, ty));
let tmp = ctx.var_ids.next();
let vec_prefix = format!("@{}", ll_ty.replace("%", ""));
ctx.code.add(format!("{} = call i32 {}.cmp({} {}, {} {})",
tmp,
vec_prefix,
ll_ty,
left_tmp,
ll_ty,
right_tmp));
ctx.code.add(format!("{} = icmp {} i32 {}, {}",
bin_tmp,
op_name,
tmp,
value));
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
bin_tmp,
out_ty_str,
llvm_symbol(output)));
}
_ => weld_err!("Illegal type {} in BinOp", print_type(ty))?,
}
}
Negate { ref output, ref child } => {
let out_ty = try!(get_sym_ty(func, output));
let ll_ty = try!(self.llvm_type(out_ty)).to_string();
let child_tmp =
try!(self.load_var(llvm_symbol(child).as_str(), &ll_ty, ctx));
let bin_tmp = ctx.var_ids.next();
let out_ty_str = try!(self.llvm_type(&out_ty)).to_string();
let op_name = try!(llvm_binop(BinOpKind::Subtract, out_ty));
ctx.code
.add(format!("{} = {} {} 0, {}", bin_tmp, op_name, ll_ty, child_tmp));
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
bin_tmp,
out_ty_str,
llvm_symbol(output)));
}
Cast { ref output, ref new_ty, ref child } => {
let old_ty = try!(get_sym_ty(func, child));
let old_ll_ty = try!(self.llvm_type(&old_ty)).to_string();
if old_ty != new_ty {
let op_name = try!(llvm_castop(&old_ty, &new_ty));
let new_ll_ty = try!(self.llvm_type(&new_ty)).to_string();
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&old_ll_ty, ctx));
let cast_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = {} {} {} to {}",
cast_tmp,
op_name,
old_ll_ty,
child_tmp,
new_ll_ty));
let out_ty = try!(get_sym_ty(func, output));
let out_ty_str = try!(self.llvm_type(&out_ty)).to_string();
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
cast_tmp,
out_ty_str,
llvm_symbol(output)));
} else {
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&old_ll_ty, ctx));
ctx.code.add(format!("store {} {}, {}* {}",
old_ll_ty,
child_tmp,
old_ll_ty,
llvm_symbol(output)));
}
}
Lookup { ref output, ref child, ref index } => {
let child_ty = try!(get_sym_ty(func, child));
match *child_ty {
Vector(_) => {
let child_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let output_ty = try!(get_sym_ty(func, output));
let output_ll_ty = try!(self.llvm_type(&output_ty)).to_string();
let vec_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let vec_prefix = format!("@{}", vec_ll_ty.replace("%", ""));
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&child_ll_ty, ctx));
let index_tmp = try!(self.load_var(llvm_symbol(index).as_str(),
"i64", ctx));
let res_ptr = ctx.var_ids.next();
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {}* {}.at({} {}, i64 {})",
res_ptr,
output_ll_ty,
vec_prefix,
vec_ll_ty,
child_tmp,
index_tmp));
ctx.code.add(format!("{} = load {}, {}* {}",
res_tmp,
output_ll_ty,
output_ll_ty,
res_ptr));
ctx.code.add(format!("store {} {}, {}* {}",
output_ll_ty,
res_tmp,
output_ll_ty,
llvm_symbol(output)));
}
Dict(_, _) => {
let child_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let output_ty = try!(get_sym_ty(func, output));
let output_ll_ty = try!(self.llvm_type(&output_ty)).to_string();
let dict_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let index_ty = try!(get_sym_ty(func, index));
let index_ll_ty = try!(self.llvm_type(&index_ty)).to_string();
let dict_prefix = format!("@{}", dict_ll_ty.replace("%", ""));
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&child_ll_ty, ctx));
let index_tmp = try!(self.load_var(llvm_symbol(index).as_str(),
&index_ll_ty, ctx));
let slot = ctx.var_ids.next();
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {}.slot {}.lookup({} {}, {} {})",
slot,
dict_ll_ty,
dict_prefix,
dict_ll_ty,
child_tmp,
index_ll_ty,
index_tmp));
ctx.code.add(format!("{} = call {} {}.slot.value({}.slot {})",
res_tmp,
output_ll_ty,
dict_prefix,
dict_ll_ty,
slot));
ctx.code.add(format!("store {} {}, {}* {}",
output_ll_ty,
res_tmp,
output_ll_ty,
llvm_symbol(output)));
}
_ => weld_err!("Illegal type {} in Lookup", print_type(child_ty))?,
}
}
Slice { ref output, ref child, ref index, ref size } => {
let child_ty = try!(get_sym_ty(func, child));
match *child_ty {
Vector(_) => {
let child_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let output_ty = try!(get_sym_ty(func, output));
let output_ll_ty = try!(self.llvm_type(&output_ty)).to_string();
let vec_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let vec_prefix = format!("@{}", vec_ll_ty.replace("%", ""));
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&child_ll_ty, ctx));
let index_tmp = try!(self.load_var(llvm_symbol(index).as_str(),
"i64", ctx));
let size_tmp = try!(self.load_var(llvm_symbol(size).as_str(),
"i64", ctx));
let res_ptr = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} {}.slice({} {}, i64 {}, \
i64{})",
res_ptr,
output_ll_ty,
vec_prefix,
vec_ll_ty,
child_tmp,
index_tmp,
size_tmp));
let out_ty = try!(get_sym_ty(func, output));
let out_ty_str = try!(self.llvm_type(&out_ty)).to_string();
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
res_ptr,
out_ty_str,
llvm_symbol(output)))
}
_ => weld_err!("Illegal type {} in Slice", print_type(child_ty))?,
}
}
Exp { ref output, ref child } => {
let child_ty = try!(get_sym_ty(func, child));
if let Scalar(ref ty) = *child_ty {
let child_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&child_ll_ty, ctx));
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} @llvm.exp.{}({} {})",
res_tmp,
child_ll_ty,
ty,
child_ll_ty,
child_tmp));
let out_ty = try!(get_sym_ty(func, output));
let out_ty_str = try!(self.llvm_type(&out_ty)).to_string();
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
res_tmp,
out_ty_str,
llvm_symbol(output)));
} else {
weld_err!("Illegal type {} in Exp", print_type(child_ty))?;
}
}
ToVec { ref output, ref child } => {
let old_ty = try!(get_sym_ty(func, child));
let new_ty = try!(get_sym_ty(func, output));
let old_ll_ty = try!(self.llvm_type(&old_ty)).to_string();
let new_ll_ty = try!(self.llvm_type(&new_ty)).to_string();
let dict_prefix = format!("@{}", old_ll_ty.replace("%", ""));
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&old_ll_ty, ctx));
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} {}.tovec({} {})",
res_tmp,
new_ll_ty,
dict_prefix,
old_ll_ty,
child_tmp));
let out_ty = try!(get_sym_ty(func, output));
let out_ty_str = try!(self.llvm_type(&out_ty)).to_string();
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
res_tmp,
out_ty_str,
llvm_symbol(output)));
}
Length { ref output, ref child } => {
let child_ty = try!(get_sym_ty(func, child));
let child_ll_ty = try!(self.llvm_type(&child_ty)).to_string();
let vec_prefix = format!("@{}", child_ll_ty.replace("%", ""));
let child_tmp = try!(self.load_var(llvm_symbol(child).as_str(),
&child_ll_ty, ctx));
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call i64 {}.size({} {})",
res_tmp,
vec_prefix,
child_ll_ty,
child_tmp));
let out_ty = try!(get_sym_ty(func, output));
let out_ty_str = try!(self.llvm_type(&out_ty)).to_string();
ctx.code.add(format!("store {} {}, {}* {}",
out_ty_str,
res_tmp,
out_ty_str,
llvm_symbol(output)));
}
Assign { ref output, ref value } => {
let ty = try!(get_sym_ty(func, output));
let ll_ty = try!(self.llvm_type(&ty)).to_string();
let val_tmp = try!(self.load_var(llvm_symbol(value).as_str(), &ll_ty, ctx));
ctx.code.add(format!("store {} {}, {}* {}",
ll_ty,
val_tmp,
ll_ty,
llvm_symbol(output)));
}
GetField { ref output, ref value, index } => {
let struct_ty = try!(self.llvm_type(try!(get_sym_ty(func, value))))
.to_string();
let field_ty = try!(self.llvm_type(try!(get_sym_ty(func, output))))
.to_string();
let struct_tmp = try!(self.load_var(llvm_symbol(value).as_str(),
&struct_ty, ctx));
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = extractvalue {} {}, {}",
res_tmp,
struct_ty,
struct_tmp,
index));
ctx.code.add(format!("store {} {}, {}* {}",
field_ty,
res_tmp,
field_ty,
llvm_symbol(output)));
}
AssignLiteral { ref output, ref value } => {
match *value {
BoolLiteral(l) => {
ctx.code.add(format!("store i1 {}, i1* {}",
if l { 1 } else { 0 },
llvm_symbol(output)))
}
I8Literal(l) => {
ctx.code.add(format!("store i8 {}, i8* {}", l, llvm_symbol(output)))
}
I32Literal(l) => {
ctx.code
.add(format!("store i32 {}, i32* {}", l, llvm_symbol(output)))
}
I64Literal(l) => {
ctx.code
.add(format!("store i64 {}, i64* {}", l, llvm_symbol(output)))
}
F32Literal(l) => {
ctx.code.add(format!("store float {:.3}, float* {}",
l,
llvm_symbol(output)))
}
F64Literal(l) => {
ctx.code.add(format!("store double {:.3}, double* {}",
l,
llvm_symbol(output)))
}
}
}
Merge { ref builder, ref value } => {
let bld_ty = try!(get_sym_ty(func, builder));
match *bld_ty {
Builder(ref bk) => {
match *bk {
Appender(ref t) => {
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let bld_tmp =
try!(self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx));
let elem_ty_str = try!(self.llvm_type(t)).to_string();
let elem_tmp =
try!(self.load_var(llvm_symbol(value).as_str(),
&elem_ty_str,
ctx));
ctx.code.add(format!("call {} {}.merge({} {}, {} {}, \
i32 %cur.tid)",
bld_ty_str,
bld_prefix,
bld_ty_str,
bld_tmp,
elem_ty_str,
elem_tmp));
}
DictMerger(ref kt, ref vt, _) => {
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let bld_tmp =
try!(self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx));
let elem_ty = Struct(vec![*kt.clone(), *vt.clone()]);
let elem_ty_str = try!(self.llvm_type(&elem_ty))
.to_string();
let elem_tmp =
try!(self.load_var(llvm_symbol(value).as_str(),
&elem_ty_str,
ctx));
ctx.code
.add(format!("call {} {}.merge({} {}, {} {}, i32 \
%cur.tid)",
bld_ty_str,
bld_prefix,
bld_ty_str,
bld_tmp,
elem_ty_str,
elem_tmp));
}
Merger(ref t, ref op) => {
let bld_ty_str = self.llvm_type(&bld_ty)?.to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let elem_ty_str = self.llvm_type(t)?.to_string();
let bld_tmp = self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx)?;
let elem_tmp = self.load_var(llvm_symbol(value).as_str(),
&elem_ty_str,
ctx)?;
let bld_ptr_raw = ctx.var_ids.next();
let bld_ptr = ctx.var_ids.next();
ctx.code
.add(format!("{} = call i8* {}.merge_ptr({} {}, i32 \
%cur.tid)",
bld_ptr_raw,
bld_prefix,
bld_ty_str,
bld_tmp));
ctx.code.add(format!("{} = bitcast i8* {} to {}*",
bld_ptr,
bld_ptr_raw,
elem_ty_str));
try!(self.gen_merge(bld_ptr, elem_tmp, elem_ty_str, op, t, ctx));
}
VecMerger(ref t, ref op) => {
let bld_ty_str = self.llvm_type(&bld_ty)?.to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let elem_ty_str = self.llvm_type(t)?.to_string();
let merge_ty = Struct(vec![Scalar(ScalarKind::I64),
*t.clone()]);
let merge_ty_str = self.llvm_type(&merge_ty)?
.to_string();
let bld_tmp = self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx)?;
let elem_tmp = self.load_var(llvm_symbol(value).as_str(),
&merge_ty_str,
ctx)?;
let index_var = ctx.var_ids.next();
let elem_var = ctx.var_ids.next();
ctx.code.add(format!("{} = extractvalue {} {}, 0",
index_var,
merge_ty_str,
elem_tmp));
ctx.code.add(format!("{} = extractvalue {} {}, 1",
elem_var,
merge_ty_str,
elem_tmp));
let bld_ptr_raw = ctx.var_ids.next();
let bld_ptr = ctx.var_ids.next();
ctx.code
.add(format!("{} = call i8* {}.merge_ptr({} {}, i64 \
{}, i32 %cur.tid)",
bld_ptr_raw,
bld_prefix,
bld_ty_str,
bld_tmp,
index_var));
ctx.code.add(format!("{} = bitcast i8* {} to {}*",
bld_ptr,
bld_ptr_raw,
elem_ty_str));
try!(self.gen_merge(bld_ptr, elem_var, elem_ty_str, op, t, ctx));
}
}
}
_ => {
weld_err!("Non builder type {} found in DoMerge",
print_type(bld_ty))?
}
}
}
Res { ref output, ref builder } => {
let bld_ty = try!(get_sym_ty(func, builder));
let res_ty = try!(get_sym_ty(func, output));
match *bld_ty {
Builder(ref bk) => {
match *bk {
Appender(_) => {
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let res_ty_str = try!(self.llvm_type(&res_ty)).to_string();
let bld_tmp =
try!(self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx));
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} {}.result({} {})",
res_tmp,
res_ty_str,
bld_prefix,
bld_ty_str,
bld_tmp));
ctx.code.add(format!("store {} {}, {}* {}",
res_ty_str,
res_tmp,
res_ty_str,
llvm_symbol(output)));
}
Merger(ref t, ref op) => {
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let res_ty_str = try!(self.llvm_type(&res_ty)).to_string();
let bld_tmp =
try!(self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx));
// Get the first builder.
ctx.code
.add(format!("%bldPtrFirst = call {bld_ty_str} \
{bld_prefix}.\
getPtrIndexed({bld_ty_str} \
{bld_tmp}, i32 0)",
bld_ty_str = bld_ty_str,
bld_prefix = bld_prefix,
bld_tmp = bld_tmp));
ctx.code.add(format!("
\
%bldPtrCasted = bitcast \
{bld_ty_str} %bldPtrFirst to \
{elem_ty_str}*",
bld_ty_str = bld_ty_str,
elem_ty_str = res_ty_str.clone()));
ctx.code.add(format!("
\
%first = load {elem_ty_str}, \
{elem_ty_str}* %bldPtrCasted
\
%nworkers = call i32 \
@get_nworkers()
\
br label %entry
\
entry:
\
%cond = icmp ult i32 1, %nworkers
\
br i1 %cond, label %body, label \
%done
\
",
elem_ty_str = res_ty_str.clone()));
ctx.code.add(format!("body:
%i = phi i32 [ 1, %entry \
], [ %i2, %body ]
%bldPtr = \
call {bld_ty_str} \
{bld_prefix}.\
getPtrIndexed({bld_ty_str} \
{bld_tmp}, i32 %i)
%val = load \
{elem_ty_str}, {elem_ty_str}* \
%bldPtr",
bld_prefix = bld_prefix,
bld_ty_str = bld_ty_str,
elem_ty_str = res_ty_str.clone(),
bld_tmp = bld_tmp));
try!(self.gen_merge("%bldPtrFirst".to_string(),
"%val".to_string(),
res_ty_str.to_string(),
op,
t,
ctx));
ctx.code.add(format!("%i2 = add i32 %i, 1
\
%cond2 = icmp ult i32 %i2, \
%nworkers
\
br i1 %cond2, label %body, label \
%done
\
done:
\
%final = load {res_ty_str}, \
{res_ty_str}* %bldPtrFirst
\
%asPtr = bitcast \
{bld_ty_str} {bld_tmp} to \
i8*
\
call void @free_merger(\
i8* %asPtr)",
bld_tmp = bld_tmp,
bld_ty_str = bld_ty_str,
res_ty_str = res_ty_str.to_string()));
ctx.code.add(format!("store {} {}, {}* {}",
res_ty_str,
"%final".to_string(),
res_ty_str,
llvm_symbol(output)));
}
DictMerger(_, _, _) => {
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let res_ty_str = try!(self.llvm_type(&res_ty)).to_string();
let bld_tmp =
try!(self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx));
let res_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} {}.result({} {})",
res_tmp,
res_ty_str,
bld_prefix,
bld_ty_str,
bld_tmp));
ctx.code.add(format!("store {} {}, {}* {}",
res_ty_str,
res_tmp,
res_ty_str,
llvm_symbol(output)));
}
VecMerger(ref t, ref op) => {
// The builder type (special internal type).
let bld_ty_str = try!(self.llvm_type(&bld_ty)).to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
// The result type (vec[elem_type])
let res_ty_str = try!(self.llvm_type(&res_ty)).to_string();
let res_prefix = format!("@{}",
res_ty_str.replace("%", ""));
// The element type
let elem_ty_str = self.llvm_type(t)?.to_string();
// The builder we operate on.
let bld_ptr =
try!(self.load_var(llvm_symbol(builder).as_str(),
&bld_ty_str,
ctx));
// Generate names for all temporaries.
let nworkers = ctx.var_ids.next();
let t0 = ctx.var_ids.next();
let typed_ptr = ctx.var_ids.next();
let first_vec = ctx.var_ids.next();
let size = ctx.var_ids.next();
let ret_value = ctx.var_ids.next();
let cond = ctx.var_ids.next();
let i = ctx.var_ids.next();
let vec_ptr = ctx.var_ids.next();
let cur_vec = ctx.var_ids.next();
let copy_cond = ctx.var_ids.next();
let j = ctx.var_ids.next();
let elem_ptr = ctx.var_ids.next();
let merge_value = ctx.var_ids.next();
let merge_ptr = ctx.var_ids.next();
let j2 = ctx.var_ids.next();
let copy_cond2 = ctx.var_ids.next();
let i2 = ctx.var_ids.next();
let cond2 = ctx.var_ids.next();
// Generate label names.
let label_base = ctx.var_ids.next();
let mut label_ids =
IdGenerator::new(&label_base.replace("%", ""));
let entry = label_ids.next();
let body_label = label_ids.next();
let copy_entry_label = label_ids.next();
let copy_body_label = label_ids.next();
let copy_done_label = label_ids.next();
let done_label = label_ids.next();
let raw_ptr = ctx.var_ids.next();
ctx.code.add(format!(include_str!("resources/vecmerger/vecmerger_result_start.ll"),
nworkers = nworkers,
t0 = t0,
buildPtr = bld_ptr,
resType = res_ty_str,
resPrefix = res_prefix,
elemType = elem_ty_str,
typedPtr = typed_ptr,
firstVec = first_vec,
size = size,
retValue = ret_value,
cond = cond,
i = i,
i2 = i2,
vecPtr = vec_ptr,
curVec = cur_vec,
copyCond = copy_cond,
j = j,
j2 = j2,
elemPtr = elem_ptr,
mergeValue = merge_value,
mergePtr = merge_ptr,
entry = entry,
bodyLabel = body_label,
copyEntryLabel = copy_entry_label,
copyBodyLabel = copy_body_label,
copyDoneLabel = copy_done_label,
doneLabel = done_label,
bldType = bld_ty_str,
bldPrefix = bld_prefix));
try!(self.gen_merge(merge_ptr,
merge_value,
elem_ty_str,
op,
t,
ctx));
ctx.code.add(format!(include_str!("resources/vecmerger/vecmerger_result_end.ll"),
j2 = j2,
j = j,
copyCond2 = copy_cond2,
size = size,
i2 = i2,
i = i,
cond2 = cond2,
nworkers = nworkers,
resType = res_ty_str,
retValue = ret_value,
copyBodyLabel = copy_body_label,
copyDoneLabel = copy_done_label,
doneLabel = done_label,
bodyLabel = body_label,
rawPtr = raw_ptr,
buildPtr = bld_ptr,
bldType = bld_ty_str,
output = llvm_symbol(output)));
}
}
}
_ => {
weld_err!("Non builder type {} found in GetResult",
print_type(bld_ty))?
}
}
}
NewBuilder { ref output, ref arg, ref ty } => {
match *ty {
Builder(ref bk) => {
match *bk {
Appender(_) => {
let bld_ty_str = try!(self.llvm_type(ty));
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let bld_tmp = ctx.var_ids.next();
ctx.code
.add(format!("{} = call {} {}.new(i64 16, %work_t* \
%cur.work)",
bld_tmp,
bld_ty_str,
bld_prefix));
ctx.code.add(format!("store {} {}, {}* {}",
bld_ty_str,
bld_tmp,
bld_ty_str,
llvm_symbol(output)));
}
Merger(_, ref op) => {
if *op != BinOpKind::Add {
return weld_err!("Merger only supports +");
}
let bld_ty_str = try!(self.llvm_type(ty));
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let bld_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} {}.new()",
bld_tmp,
bld_ty_str,
bld_prefix));
ctx.code.add(format!("store {} {}, {}* {}",
bld_ty_str,
bld_tmp,
bld_ty_str,
llvm_symbol(output)));
}
DictMerger(_, _, _) => {
let bld_ty_str = try!(self.llvm_type(ty));
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let bld_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} {}.new(i64 16)",
bld_tmp,
bld_ty_str,
bld_prefix));
ctx.code.add(format!("store {} {}, {}* {}",
bld_ty_str,
bld_tmp,
bld_ty_str,
llvm_symbol(output)));
}
VecMerger(ref elem, ref op) => {
if *op != BinOpKind::Add {
return weld_err!("VecMerger only supports +");
}
match *arg {
Some(ref s) => {
let bld_ty_str = try!(self.llvm_type(ty))
.to_string();
let bld_prefix = format!("@{}",
bld_ty_str.replace("%", ""));
let arg_ty =
try!(self.llvm_type(&Vector(elem.clone())))
.to_string();
let arg_ty_str = arg_ty.to_string();
let arg_str = self.load_var(llvm_symbol(s).as_str(),
&arg_ty_str,
ctx)?;
let bld_tmp = ctx.var_ids.next();
ctx.code.add(format!("{} = call {} {}.new({} \
{})",
bld_tmp,
bld_ty_str,
bld_prefix,
arg_ty_str,
arg_str));
ctx.code.add(format!("store {} {}, {}* {}",
bld_ty_str,
bld_tmp,
bld_ty_str,
llvm_symbol(output)));
}
None => {
weld_err!("Internal error: NewBuilder(VecMerger) \
expected argument in LLVM codegen")?
}
}
}
}
}
_ => {
weld_err!("Non builder type {} found in NewBuilder",
print_type(ty))?
}
}
}
}
}
match b.terminator {
Branch { ref cond, on_true, on_false } => {
let cond_tmp = try!(self.load_var(llvm_symbol(cond).as_str(), "i1", ctx));
ctx.code.add(format!("br i1 {}, label %b.b{}, label %b.b{}",
cond_tmp,
on_true,
on_false));
}
ParallelFor(ref pf) => {
try!(self.add_function(sir, &sir.funcs[pf.cont], None));
try!(self.add_function(sir, &sir.funcs[pf.body], Some(pf.clone())));
// TODO add parallel wrapper call
let params = get_combined_params(sir, pf);
let params_sorted: BTreeMap<&Symbol, &Type> = params.iter().collect();
let mut arg_types = String::new();
for (arg, ty) in params_sorted.iter() {
let ll_ty = try!(self.llvm_type(&ty)).to_string();
let arg_tmp = try!(self.load_var(llvm_symbol(arg).as_str(), &ll_ty, ctx));
let arg_str = format!("{} {}, ", &ll_ty, arg_tmp);
arg_types.push_str(&arg_str);
}
arg_types.push_str("%work_t* %cur.work");
ctx.code.add(format!("call void @f{}_wrapper({})", pf.body, arg_types));
ctx.code.add("br label %body.end");
}
JumpBlock(block) => {
ctx.code.add(format!("br label %b.b{}", block));
}
JumpFunction(func) => {
try!(self.add_function(sir, &sir.funcs[func], None));
let params_sorted: BTreeMap<&Symbol, &Type> =
sir.funcs[func].params.iter().collect();
let mut arg_types = String::new();
for (arg, ty) in params_sorted.iter() {
let ll_ty = try!(self.llvm_type(&ty)).to_string();
let arg_tmp = try!(self.load_var(llvm_symbol(arg).as_str(), &ll_ty, ctx));
let arg_str = format!("{} {}, ", ll_ty, arg_tmp);
arg_types.push_str(&arg_str);
}
arg_types.push_str("%work_t* %cur.work");
ctx.code.add(format!("call void @f{}({})", func, arg_types));
ctx.code.add("br label %body.end");
}
ProgramReturn(ref sym) => {
let ty = try!(get_sym_ty(func, sym));
let ty_str = try!(self.llvm_type(ty)).to_string();
let res_tmp = try!(self.load_var(llvm_symbol(sym).as_str(), &ty_str, ctx));
let elem_size_ptr = ctx.var_ids.next();
let elem_size = ctx.var_ids.next();
let elem_storage = ctx.var_ids.next();
let elem_storage_typed = ctx.var_ids.next();
let run_id = ctx.var_ids.next();
ctx.code.add(format!("{} = getelementptr {}, {}* null, i32 1",
&elem_size_ptr,
&ty_str,
&ty_str));
ctx.code.add(format!("{} = ptrtoint {}* {} to i64",
&elem_size,
&ty_str,
&elem_size_ptr));
ctx.code.add(format!("{} = call i64 @get_runid()", run_id));
ctx.code
.add(format!("{} = call i8* @weld_rt_malloc(i64 {}, i64 {})",
&elem_storage,
&run_id,
&elem_size));
ctx.code.add(format!("{} = bitcast i8* {} to {}*",
&elem_storage_typed,
&elem_storage,
&ty_str));
ctx.code.add(format!("store {} {}, {}* {}",
&ty_str,
res_tmp,
&ty_str,
&elem_storage_typed));
ctx.code.add(format!("call void @set_result(i8* {})", elem_storage));
ctx.code.add("br label %body.end");
}
EndFunction => {
ctx.code.add("br label %body.end");
}
Crash => {
let errno = WeldRuntimeErrno::Unknown as i64;
let run_id = ctx.var_ids.next();
ctx.code.add(format!("call void @weld_rt_set_errno(i64 {}, i64 {})",
run_id,
errno));
}
}
}
Ok(format!(""))
}
}
/// Return the LLVM version of a Weld symbol (encoding any special characters for LLVM).
fn llvm_symbol(symbol: &Symbol) -> String {
if symbol.id == 0 {
format!("%{}", symbol.name)
} else {
format!("%{}.{}", symbol.name, symbol.id)
}
}
/// Return the name of the LLVM instruction for a binary operation on a specific type.
fn llvm_binop(op_kind: BinOpKind, ty: &Type) -> WeldResult<&'static str> {
match (op_kind, ty) {
(BinOpKind::Add, &Scalar(I8)) => Ok("add"),
(BinOpKind::Add, &Scalar(I32)) => Ok("add"),
(BinOpKind::Add, &Scalar(I64)) => Ok("add"),
(BinOpKind::Add, &Scalar(F32)) => Ok("fadd"),
(BinOpKind::Add, &Scalar(F64)) => Ok("fadd"),
(BinOpKind::Subtract, &Scalar(I8)) => Ok("sub"),
(BinOpKind::Subtract, &Scalar(I32)) => Ok("sub"),
(BinOpKind::Subtract, &Scalar(I64)) => Ok("sub"),
(BinOpKind::Subtract, &Scalar(F32)) => Ok("fsub"),
(BinOpKind::Subtract, &Scalar(F64)) => Ok("fsub"),
(BinOpKind::Multiply, &Scalar(I8)) => Ok("mul"),
(BinOpKind::Multiply, &Scalar(I32)) => Ok("mul"),
(BinOpKind::Multiply, &Scalar(I64)) => Ok("mul"),
(BinOpKind::Multiply, &Scalar(F32)) => Ok("fmul"),
(BinOpKind::Multiply, &Scalar(F64)) => Ok("fmul"),
(BinOpKind::Divide, &Scalar(I8)) => Ok("sdiv"),
(BinOpKind::Divide, &Scalar(I32)) => Ok("sdiv"),
(BinOpKind::Divide, &Scalar(I64)) => Ok("sdiv"),
(BinOpKind::Divide, &Scalar(F32)) => Ok("fdiv"),
(BinOpKind::Divide, &Scalar(F64)) => Ok("fdiv"),
(BinOpKind::Equal, &Scalar(Bool)) => Ok("icmp eq"),
(BinOpKind::Equal, &Scalar(I8)) => Ok("icmp eq"),
(BinOpKind::Equal, &Scalar(I32)) => Ok("icmp eq"),
(BinOpKind::Equal, &Scalar(I64)) => Ok("icmp eq"),
(BinOpKind::Equal, &Scalar(F32)) => Ok("fcmp oeq"),
(BinOpKind::Equal, &Scalar(F64)) => Ok("fcmp oeq"),
(BinOpKind::NotEqual, &Scalar(Bool)) => Ok("icmp ne"),
(BinOpKind::NotEqual, &Scalar(I8)) => Ok("icmp ne"),
(BinOpKind::NotEqual, &Scalar(I32)) => Ok("icmp ne"),
(BinOpKind::NotEqual, &Scalar(I64)) => Ok("icmp ne"),
(BinOpKind::NotEqual, &Scalar(F32)) => Ok("fcmp one"),
(BinOpKind::NotEqual, &Scalar(F64)) => Ok("fcmp one"),
(BinOpKind::LessThan, &Scalar(I8)) => Ok("icmp slt"),
(BinOpKind::LessThan, &Scalar(I32)) => Ok("icmp slt"),
(BinOpKind::LessThan, &Scalar(I64)) => Ok("icmp slt"),
(BinOpKind::LessThan, &Scalar(F32)) => Ok("fcmp olt"),
(BinOpKind::LessThan, &Scalar(F64)) => Ok("fcmp olt"),
(BinOpKind::LessThanOrEqual, &Scalar(I8)) => Ok("icmp sle"),
(BinOpKind::LessThanOrEqual, &Scalar(I32)) => Ok("icmp sle"),
(BinOpKind::LessThanOrEqual, &Scalar(I64)) => Ok("icmp sle"),
(BinOpKind::LessThanOrEqual, &Scalar(F32)) => Ok("fcmp ole"),
(BinOpKind::LessThanOrEqual, &Scalar(F64)) => Ok("fcmp ole"),
(BinOpKind::GreaterThan, &Scalar(I8)) => Ok("icmp sgt"),
(BinOpKind::GreaterThan, &Scalar(I32)) => Ok("icmp sgt"),
(BinOpKind::GreaterThan, &Scalar(I64)) => Ok("icmp sgt"),
(BinOpKind::GreaterThan, &Scalar(F32)) => Ok("fcmp ogt"),
(BinOpKind::GreaterThan, &Scalar(F64)) => Ok("fcmp ogt"),
(BinOpKind::GreaterThanOrEqual, &Scalar(I8)) => Ok("icmp sge"),
(BinOpKind::GreaterThanOrEqual, &Scalar(I32)) => Ok("icmp sge"),
(BinOpKind::GreaterThanOrEqual, &Scalar(I64)) => Ok("icmp sge"),
(BinOpKind::GreaterThanOrEqual, &Scalar(F32)) => Ok("fcmp oge"),
(BinOpKind::GreaterThanOrEqual, &Scalar(F64)) => Ok("fcmp oge"),
(BinOpKind::LogicalAnd, &Scalar(Bool)) => Ok("and"),
(BinOpKind::BitwiseAnd, &Scalar(Bool)) => Ok("and"),
(BinOpKind::BitwiseAnd, &Scalar(I8)) => Ok("and"),
(BinOpKind::BitwiseAnd, &Scalar(I32)) => Ok("and"),
(BinOpKind::BitwiseAnd, &Scalar(I64)) => Ok("and"),
(BinOpKind::LogicalOr, &Scalar(Bool)) => Ok("or"),
(BinOpKind::BitwiseOr, &Scalar(Bool)) => Ok("or"),
(BinOpKind::BitwiseOr, &Scalar(I8)) => Ok("or"),
(BinOpKind::BitwiseOr, &Scalar(I32)) => Ok("or"),
(BinOpKind::BitwiseOr, &Scalar(I64)) => Ok("or"),
(BinOpKind::Xor, &Scalar(Bool)) => Ok("xor"),
(BinOpKind::Xor, &Scalar(I8)) => Ok("xor"),
(BinOpKind::Xor, &Scalar(I32)) => Ok("xor"),
(BinOpKind::Xor, &Scalar(I64)) => Ok("xor"),
_ => weld_err!("Unsupported binary op: {} on {}", op_kind, print_type(ty)),
}
}
/// Return the name of the LLVM instruction for a binary operation between vectors.
fn llvm_binop_vector(op_kind: BinOpKind, ty: &Type) -> WeldResult<(&'static str, i32)> {
match op_kind {
BinOpKind::Equal => Ok(("eq", 0)),
BinOpKind::NotEqual => Ok(("ne", 0)),
BinOpKind::LessThan => Ok(("eq", -1)),
BinOpKind::LessThanOrEqual => Ok(("ne", 1)),
BinOpKind::GreaterThan => Ok(("eq", 1)),
BinOpKind::GreaterThanOrEqual => Ok(("ne", -1)),
_ => weld_err!("Unsupported binary op: {} on {}", op_kind, print_type(ty)),
}
}
/// Return the name of hte LLVM instruction for a cast operation between specific types.
fn llvm_castop(ty1: &Type, ty2: &Type) -> WeldResult<&'static str> {
match (ty1, ty2) {
(&Scalar(F64), &Scalar(Bool)) => Ok("fptoui"),
(&Scalar(F32), &Scalar(Bool)) => Ok("fptoui"),
(&Scalar(Bool), &Scalar(F64)) => Ok("uitofp"),
(&Scalar(Bool), &Scalar(F32)) => Ok("uitofp"),
(&Scalar(F64), &Scalar(F32)) => Ok("fptrunc"),
(&Scalar(F32), &Scalar(F64)) => Ok("fpext"),
(&Scalar(F64), _) => Ok("fptosi"),
(&Scalar(F32), _) => Ok("fptosi"),
(_, &Scalar(F64)) => Ok("sitofp"),
(_, &Scalar(F32)) => Ok("sitofp"),
(&Scalar(Bool), _) => Ok("zext"),
(_, &Scalar(I64)) => Ok("sext"),
_ => Ok("trunc"),
}
}
/// Struct used to track state while generating a function.
struct FunctionContext {
/// Code section at the start of the function with alloca instructions for local symbols
alloca_code: CodeBuilder,
/// Other code in function
code: CodeBuilder,
defined_symbols: HashSet<String>,
var_ids: IdGenerator,
}
impl FunctionContext {
fn new() -> FunctionContext {
FunctionContext {
alloca_code: CodeBuilder::new(),
code: CodeBuilder::new(),
var_ids: IdGenerator::new("%t.t"),
defined_symbols: HashSet::new(),
}
}
fn add_alloca(&mut self, symbol: &str, ty: &str) -> WeldResult<()> {
if !self.defined_symbols.insert(symbol.to_string()) {
weld_err!("Symbol already defined in function: {}", symbol)
} else {
self.alloca_code.add(format!("{} = alloca {}", symbol, ty));
Ok(())
}
}
}
/// Generates a small program which, when called with a `run_id`, frees
/// memory associated with the run ID.
pub fn generate_runtime_interface_module() -> WeldResult<easy_ll::CompiledModule> {
let program = include_str!("resources/runtime_interface_module.ll");
Ok(try!(easy_ll::compile_module(program, None)))
}
/// Generate a compiled LLVM module from a program whose body is a function.
pub fn compile_program(program: &Program) -> WeldResult<easy_ll::CompiledModule> {
let mut expr = try!(macro_processor::process_program(program));
transforms::uniquify(&mut expr);
try!(type_inference::infer_types(&mut expr));
let mut expr = try!(expr.to_typed());
transforms::inline_apply(&mut expr);
transforms::inline_let(&mut expr);
transforms::inline_zips(&mut expr);
transforms::fuse_loops_horizontal(&mut expr);
transforms::fuse_loops_vertical(&mut expr);
transforms::uniquify(&mut expr);
let sir_prog = try!(sir::ast_to_sir(&expr));
let mut gen = LlvmGenerator::new();
try!(gen.add_function_on_pointers("run", &sir_prog));
Ok(try!(easy_ll::compile_module(&gen.result(), Some(MERGER_BC))))
}
#[test]
fn types() {
let mut gen = LlvmGenerator::new();
assert_eq!(gen.llvm_type(&Scalar(I32)).unwrap(), "i32");
assert_eq!(gen.llvm_type(&Scalar(I64)).unwrap(), "i64");
assert_eq!(gen.llvm_type(&Scalar(F32)).unwrap(), "float");
assert_eq!(gen.llvm_type(&Scalar(F64)).unwrap(), "double");
assert_eq!(gen.llvm_type(&Scalar(I8)).unwrap(), "i8");
assert_eq!(gen.llvm_type(&Scalar(Bool)).unwrap(), "i1");
let struct1 = parse_type("{i32,bool,i32}").unwrap().to_type().unwrap();
assert_eq!(gen.llvm_type(&struct1).unwrap(), "%s0");
assert_eq!(gen.llvm_type(&struct1).unwrap(), "%s0"); // Name is reused for same struct
let struct2 = parse_type("{i32,bool}").unwrap().to_type().unwrap();
assert_eq!(gen.llvm_type(&struct2).unwrap(), "%s1");
}
| 57.3541 | 123 | 0.363902 |
71ce91db87aa6b4ea488f692b770ecd489ca04ae | 549 | use std::error::Error;
pub struct Rot13(pub String);
impl super::Cipher for Rot13 {
fn original_string(&self) -> Result<String, Box<dyn Error>> {
Ok(String::from(&self.0))
}
fn encrypted_string(&self) -> Result<String, Box<dyn Error>> {
Ok(self
.0
.chars()
.map(|ch| match ch {
'a'..='m' | 'A'..='M' => (ch as u8 + 13) as char,
'n'..='z' | 'N'..='Z' => (ch as u8 - 13) as char,
_ => ch,
})
.collect())
}
}
| 24.954545 | 66 | 0.433515 |
5bc0d086792ebef046296d1ae052c7e8dd2082a8 | 2,896 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::ffi::{CString, OsStr, OsString};
use super::util::CStringArray;
/// A mapping of environment variables.
#[derive(Default, Clone, Debug)]
pub struct Env {
clear: bool,
vars: BTreeMap<OsString, Option<OsString>>,
}
impl Env {
/// Clear out all environment variables, including the ones inherited from
/// the parent process. Any variables set after this are completely new
/// variables.
pub fn clear(&mut self) {
self.clear = true;
self.vars.clear();
}
pub fn is_cleared(&self) -> bool {
self.clear
}
pub fn set(&mut self, key: &OsStr, value: &OsStr) {
self.vars.insert(key.to_owned(), Some(value.to_owned()));
}
pub fn get<K: AsRef<OsStr>>(&self, key: K) -> Option<&OsStr> {
self.vars
.get(key.as_ref())
.and_then(|v| v.as_ref().map(|v| v.as_os_str()))
}
pub fn get_captured<K: AsRef<OsStr>>(&self, key: K) -> Option<Cow<OsStr>> {
let key = key.as_ref();
if !self.clear {
if let Some(var) = std::env::var_os(key) {
return Some(Cow::Owned(var));
}
}
self.get(key).map(Cow::Borrowed)
}
pub fn remove(&mut self, key: &OsStr) {
if self.clear {
self.vars.remove(key);
} else {
self.vars.insert(key.to_owned(), None);
}
}
/// Capture the current environment and merge it with the changes we've
/// applied.
pub fn capture(&self) -> BTreeMap<OsString, OsString> {
let mut env = if self.clear {
BTreeMap::new()
} else {
// Capture from the current environment.
std::env::vars_os().collect()
};
for (k, v) in &self.vars {
if let Some(ref v) = v {
env.insert(k.clone(), v.clone());
} else {
env.remove(k);
}
}
env
}
pub fn array(&self) -> CStringArray {
use std::os::unix::ffi::OsStringExt;
let env = self.capture();
let mut result = CStringArray::with_capacity(env.len());
for (mut k, v) in env {
// Reserve additional space for '=' and null terminator
k.reserve_exact(v.len() + 2);
k.push("=");
k.push(&v);
// Add the new entry into the array
result.push(CString::new(k.into_vec()).unwrap());
}
result
}
pub fn iter(&self) -> impl Iterator<Item = (&OsStr, Option<&OsStr>)> {
self.vars.iter().map(|(k, v)| (k.as_ref(), v.as_deref()))
}
}
| 26.568807 | 79 | 0.539019 |
1a07611397f2dc1b60d00fae0dc7589acabb8265 | 16,507 | extern crate treeflection;
#[macro_use] extern crate treeflection_derive;
#[macro_use] extern crate matches;
#[macro_use] extern crate serde_derive;
extern crate serde;
extern crate serde_json;
use treeflection::{Node, NodeRunner, NodeToken};
#[derive(Node, Serialize, Deserialize, Default, Clone)]
struct Parent {
pub foo: String,
pub bar: u32,
pub baz: bool,
pub child: Child,
private: i64,
}
#[NodeActions(
NodeAction(action="action_name", function="function_name", args="1", help="add the first argument to qux"),
NodeAction(function="same_name", return_string),
)]
#[derive(Node, Serialize, Deserialize, Default, Clone)]
struct Child {
pub qux: i32,
}
impl Child {
fn new() -> Child {
Child {
qux: 413,
}
}
pub fn function_name(&mut self, value: String) {
self.qux += value.parse::<i32>().unwrap();
}
pub fn same_name(&self) -> String {
String::from("basic action")
}
}
impl Parent {
fn new() -> Parent {
Parent {
foo: String::from("hiya"),
bar: 42,
baz: true,
child: Child {
qux: -13,
},
private: 1337,
}
}
fn empty() -> Parent {
Parent {
foo: String::new(),
bar: 0,
baz: false,
child: Child {
qux: 0,
},
private: 0,
}
}
}
#[test]
fn custom_function_name() {
let mut child = Child::new();
let runner = NodeRunner { tokens: vec!(
NodeToken::Custom(String::from("action_name"), vec!(String::from("7")))
)};
assert_eq!(child.node_step(runner), String::from(""));
assert_eq!(child.qux, 420);
}
#[test]
fn custom_same_name() {
let mut child = Child::new();
let runner = NodeRunner { tokens: vec!(
NodeToken::Custom(String::from("same_name"), vec!())
)};
assert_eq!(child.node_step(runner), String::from("basic action"));
assert_eq!(child.qux, 413);
}
#[test]
fn get_struct() {
let output =
r#"{
"foo": "hiya",
"bar": 42,
"baz": true,
"child": {
"qux": -13
},
"private": 1337
}"#;
assert_eq!(
Parent::new().node_step(NodeRunner { tokens: vec!(NodeToken::Get) }),
String::from(output)
);
}
#[test]
fn set_struct() {
let mut parent = Parent::new();
let runner = NodeRunner { tokens: vec!( NodeToken::Set(
String::from(r#"{"foo":"Memes","bar":42,"baz":true,"child":{"qux":1337},"private":-1}"#)
) )};
assert_eq!(parent.node_step(runner), String::from(""));
assert_eq!(parent.foo, String::from("Memes"));
assert_eq!(parent.bar, 42);
assert_eq!(parent.baz, true);
assert_eq!(parent.child.qux, 1337);
assert_eq!(parent.private, -1);
}
#[test]
fn no_property() {
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("notfoo")),
)};
assert_eq!(Parent::new().node_step(runner), String::from("Parent does not have a property 'notfoo'"));
}
#[test]
fn private_property() {
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("private")),
)};
assert_eq!(Parent::new().node_step(runner), String::from("Parent does not have a property 'private'"));
}
#[test]
fn string_property() {
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("foo")),
)};
assert_eq!(Parent::new().node_step(runner), String::from("hiya"));
}
#[test]
fn uint_property() {
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("bar")),
)};
assert_eq!(Parent::new().node_step(runner), String::from("42"));
}
#[test]
fn bool_property() {
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("baz")),
)};
assert_eq!(Parent::new().node_step(runner), String::from("true"));
}
#[test]
fn int_child_property() {
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("qux")),
NodeToken::ChainProperty(String::from("child")),
)};
assert_eq!(Parent::new().node_step(runner), "-13");
}
#[test]
fn default_struct() {
let runner = NodeRunner { tokens: vec!(NodeToken::SetDefault) };
let mut parent = Parent::new();
assert_eq!(parent.node_step(runner), String::from(""));
assert_eq!(parent.foo, String::new());
assert_eq!(parent.bar, 0);
assert_eq!(parent.baz, false);
assert_eq!(parent.child.qux, 0);
assert_eq!(parent.private, 0);
}
#[test]
fn variant_struct() {
let runner = NodeRunner { tokens: vec!(NodeToken::SetVariant(String::from("something"))) };
let mut parent = Parent::new();
assert_eq!(parent.node_step(runner), String::from("Parent cannot \'SetVariant(\"something\")\'"));
}
#[test]
fn copy_paste_struct() {
let copy_token = NodeRunner { tokens: vec!(NodeToken::CopyFrom) };
let paste_token = NodeRunner { tokens: vec!(NodeToken::PasteTo) };
let mut a = Parent::new();
let mut b = Parent::empty();
assert_eq!(a.node_step(copy_token), "");
assert_eq!(a.bar, 42);
assert_eq!(a.child.qux, -13);
assert_eq!(b.bar, 0);
assert_eq!(b.child.qux, 0);
assert_eq!(b.node_step(paste_token), "");
assert_eq!(b.bar, 42);
assert_eq!(a.child.qux, -13);
}
#[test]
fn help_struct_parent() {
let output = r#"
Parent Help
Actions:
* help - display this help
* get - display JSON
* set - set to JSON
* copy - copy the values from this struct
* paste - paste the copied values to this struct
* reset - reset to default values
Accessors:
* foo - String
* bar - u32
* baz - bool
* child - Child"#;
let mut parent = Parent::new();
let runner = NodeRunner { tokens: vec!(NodeToken::Help) };
assert_eq!(parent.node_step(runner), String::from(output));
}
#[test]
fn help_struct_child() {
let output = r#"
Child Help
Actions:
* help - display this help
* get - display JSON
* set - set to JSON
* copy - copy the values from this struct
* paste - paste the copied values to this struct
* reset - reset to default values
* action_name - add the first argument to qux
* same_name
Accessors:
* qux - i32"#;
let mut parent = Child::new();
let runner = NodeRunner { tokens: vec!(NodeToken::Help) };
assert_eq!(parent.node_step(runner), String::from(output));
}
#[derive(Node, Serialize, Deserialize, Clone, Debug)]
enum SomeEnum {
Foo,
Bar,
Baz { x: f32, y: f32 },
Qux (u8),
Quux (i64, String, bool),
GenericUnnamed (Vec<usize>),
GenericNamed { generic: Vec<usize> },
GenericInTupleUnnamed ((Vec<usize>, Vec<String>)),
}
impl Default for SomeEnum {
fn default() -> SomeEnum {
SomeEnum::Foo
}
}
// test for unused variable warnings in generated code
#[derive(Node, Serialize, Deserialize, Clone)]
enum SimpleEnum {
Foo,
}
impl Default for SimpleEnum {
fn default() -> SimpleEnum {
SimpleEnum::Foo
}
}
#[test]
fn get_unit_enum() {
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!(NodeToken::Get) };
assert_eq!(some_enum.node_step(runner), "\"Foo\"");
let mut some_enum = SomeEnum::Bar;
let runner = NodeRunner { tokens: vec!(NodeToken::Get) };
assert_eq!(some_enum.node_step(runner), "\"Bar\"");
}
#[test]
fn set_unit_enum() {
let mut some_enum = SomeEnum::Bar;
let runner = NodeRunner { tokens: vec!( NodeToken::Set(String::from("\"Foo\"")) )};
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Foo));
let mut some_enum = SomeEnum::Bar;
let runner = NodeRunner { tokens: vec!( NodeToken::Set(String::from("\"Bar\"")) )};
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Bar));
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!( NodeToken::Set(String::from("\"Bar\"")) )};
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Bar));
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!( NodeToken::Set(String::from("\"Foo\"")) )};
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Foo));
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!( NodeToken::Set(String::from("\"Aether\"")) )};
assert_eq!(some_enum.node_step(runner), "SomeEnum set Error: unknown variant `Aether`, expected one of `Foo`, `Bar`, `Baz`, `Qux`, `Quux`, `GenericUnnamed`, `GenericNamed`, `GenericInTupleUnnamed` at line 1 column 8");
assert!(matches!(some_enum, SomeEnum::Foo));
}
#[test]
fn get_tuple_enum() {
let mut some_enum = SomeEnum::Qux(42);
let runner = NodeRunner { tokens: vec!(NodeToken::Get) };
let output =
r#"{
"Qux": 42
}"#;
assert_eq!(some_enum.node_step(runner), output);
let mut some_enum = SomeEnum::Quux(-1337, String::from("YOYOYO"), true);
let runner = NodeRunner { tokens: vec!(NodeToken::Get) };
let output =
r#"{
"Quux": [
-1337,
"YOYOYO",
true
]
}"#;
assert_eq!(some_enum.node_step(runner), output);
}
#[test]
fn set_tuple_enum() {
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!(NodeToken::Set(String::from("{\"Qux\":13}"))) };
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Qux(13)));
let mut some_enum = SomeEnum::Bar;
let runner = NodeRunner { tokens: vec!(NodeToken::Set(String::from("{\"Quux\":[-42, \"SomeString\", true]}"))) };
assert_eq!(some_enum.node_step(runner), String::from(""));
match some_enum {
SomeEnum::Quux (-42, some_string, true) => {
assert_eq!(some_string.as_str(), "SomeString");
}
_ => { panic!("Did not match SomeEnum::Quux (-42, _, true)") }
}
}
#[test]
fn get_struct_enum() {
let mut some_enum = SomeEnum::Baz {x: 412.12345, y: 44.11};
let runner = NodeRunner { tokens: vec!(NodeToken::Get) };
let output =
r#"{
"Baz": {
"x": 412.12344,
"y": 44.11
}
}"#;
assert_eq!(some_enum.node_step(runner), output);
}
#[test]
fn set_struct_enum() {
let mut some_enum = SomeEnum::Baz {x: 412.12345, y: 44.11};
let runner = NodeRunner { tokens: vec!(NodeToken::Set(String::from(r#"{"Baz":{"x":1337.1337,"y":42.13}}"#))) };
assert_eq!(some_enum.node_step(runner), String::from(""));
assert_eq!(format!("{:?}", some_enum), String::from("Baz { x: 1337.1337, y: 42.13 }"));
}
#[test]
fn no_property_unit_enum() {
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("notx")),
)};
assert_eq!(some_enum.node_step(runner), String::from("Foo does not have a property 'notx'"));
}
#[test]
fn no_property_tuple_enum() {
let mut some_enum = SomeEnum::Qux(42);
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("notx")),
)};
assert_eq!(some_enum.node_step(runner), String::from("Qux does not have a property 'notx'"));
}
#[test]
fn no_property_struct_enum() {
let mut some_enum = SomeEnum::Baz { x: 42.0, y: 13.37 };
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("notx")),
)};
assert_eq!(some_enum.node_step(runner), String::from("Baz does not have a property 'notx'"));
}
#[test]
fn f32_property_struct_enum() {
let mut some_enum = SomeEnum::Baz { x: 42.0, y: 13.37 };
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("x")),
)};
assert_eq!(some_enum.node_step(runner), String::from("42"));
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainProperty(String::from("y")),
)};
assert_eq!(some_enum.node_step(runner), String::from("13.37"));
}
#[test]
fn index_unit_enum() {
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainIndex(0),
)};
assert_eq!(some_enum.node_step(runner), String::from("Cannot index Foo"));
}
#[test]
fn index_struct_enum() {
let mut some_enum = SomeEnum::Baz { x: 42.0, y: 13.37 };
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainIndex(0),
)};
assert_eq!(some_enum.node_step(runner), String::from("Cannot index Baz"));
}
#[test]
fn index_tuple_enum() {
let mut some_enum = SomeEnum::Quux(-1337, String::from("YOYOYO"), true);
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainIndex(0),
)};
assert_eq!(some_enum.node_step(runner), String::from("-1337"));
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainIndex(1),
)};
assert_eq!(some_enum.node_step(runner), String::from("YOYOYO"));
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainIndex(2),
)};
assert_eq!(some_enum.node_step(runner), String::from("true"));
let runner = NodeRunner { tokens: vec!(
NodeToken::Get,
NodeToken::ChainIndex(3),
)};
assert_eq!(some_enum.node_step(runner), String::from("Used index 3 on a Quux (try a value between 0-2"));
}
#[test]
fn variant_enum() {
let mut some_enum = SomeEnum::Bar;
let runner = NodeRunner { tokens: vec!(NodeToken::SetVariant(String::from("Foo"))) };
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Foo));
let runner = NodeRunner { tokens: vec!(NodeToken::SetVariant(String::from("Baz"))) };
assert_eq!(some_enum.node_step(runner), String::from(""));
assert_eq!(format!("{:?}", some_enum), String::from("Baz { x: 0.0, y: 0.0 }"));
let runner = NodeRunner { tokens: vec!(NodeToken::SetVariant(String::from("Qux"))) };
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Qux (0)));
let mut some_enum = SomeEnum::Bar;
let runner = NodeRunner { tokens: vec!(NodeToken::SetVariant(String::from("nonexistent"))) };
assert_eq!(some_enum.node_step(runner), String::from("SomeEnum does not have a variant 'nonexistent'"));
assert!(matches!(some_enum, SomeEnum::Bar));
}
#[test]
fn default_enum() {
let mut some_enum = SomeEnum::Bar;
let runner = NodeRunner { tokens: vec!(NodeToken::SetDefault) };
assert_eq!(some_enum.node_step(runner), String::from(""));
assert!(matches!(some_enum, SomeEnum::Foo));
}
#[test]
fn copy_paste_enum() {
let copy_token = NodeRunner { tokens: vec!(NodeToken::CopyFrom) };
let paste_token = NodeRunner { tokens: vec!(NodeToken::PasteTo) };
let mut a = SomeEnum::Qux (13);
let mut b = SomeEnum::Foo;
assert_eq!(a.node_step(copy_token), "");
assert!(matches!(a, SomeEnum::Qux (13)));
assert_eq!(b.node_step(paste_token), "");
assert!(matches!(b, SomeEnum::Qux (13)));
}
// TODO: display tuple and struct enum details under valid values:
// Probably use json equivalent of below
//* Foo
//* Bar
//* Baz {x: f32, y: f32}
//* Qux (u8)
//* Quux (i64, String, bool)
#[test]
fn help_enum() {
let output = r#"
SomeEnum Help
Actions:
* help - display this help
* get - display JSON
* set - set to JSON
* copy - copy the values from this enum
* paste - paste the copied values to this enum
* reset - reset to default variant
* variant - set to the specified variant
Valid variants:
* Foo
* Bar
* Baz
* Qux
* Quux
* GenericUnnamed
* GenericNamed
* GenericInTupleUnnamed
Accessors:
Changes depending on which variant the enum is currently set to:
As Baz:
* .x - f32
* .y - f32
As Qux:
* [0] - u8
As Quux:
* [0] - i64
* [1] - String
* [2] - bool
As GenericUnnamed:
* [0] - Vec
As GenericNamed:
* .generic - Vec
As GenericInTupleUnnamed:
* [0] - Tuple
"#;
let mut some_enum = SomeEnum::Foo;
let runner = NodeRunner { tokens: vec!(NodeToken::Help) };
assert_eq!(some_enum.node_step(runner), String::from(output));
}
| 28.217094 | 222 | 0.616769 |
eb09274ff480ceccc2dc6404080cb72a73ac609f | 11,343 | extern crate webpki;
extern crate rcgen;
extern crate ring;
extern crate pem;
#[cfg(feature = "x509-parser")]
use rcgen::CertificateSigningRequest;
use rcgen::{BasicConstraints, Certificate, CertificateParams, DnType, IsCa};
use webpki::{EndEntityCert, TlsServerTrustAnchors, TrustAnchor};
use webpki::SignatureAlgorithm;
use webpki::{Time, DnsNameRef};
use ring::rand::SystemRandom;
use ring::signature;
use ring::signature::{EcdsaKeyPair, EcdsaSigningAlgorithm,
Ed25519KeyPair, RSA_PKCS1_SHA256, RsaKeyPair};
use std::convert::TryFrom;
mod util;
fn sign_msg_ecdsa(cert :&Certificate, msg :&[u8], alg :&'static EcdsaSigningAlgorithm) -> Vec<u8> {
let pk_der = cert.serialize_private_key_der();
let key_pair = EcdsaKeyPair::from_pkcs8(&alg, &pk_der).unwrap();
let system_random = SystemRandom::new();
let signature = key_pair.sign(&system_random, &msg).unwrap();
signature.as_ref().to_vec()
}
fn sign_msg_ed25519(cert :&Certificate, msg :&[u8]) -> Vec<u8> {
let pk_der = cert.serialize_private_key_der();
let key_pair = Ed25519KeyPair::from_pkcs8_maybe_unchecked(&pk_der).unwrap();
let signature = key_pair.sign(&msg);
signature.as_ref().to_vec()
}
fn sign_msg_rsa(cert :&Certificate, msg :&[u8]) -> Vec<u8> {
let pk_der = cert.serialize_private_key_der();
let key_pair = RsaKeyPair::from_pkcs8(&pk_der).unwrap();
let system_random = SystemRandom::new();
let mut signature = vec![0; key_pair.public_modulus_len()];
key_pair.sign(&RSA_PKCS1_SHA256, &system_random, &msg,
&mut signature).unwrap();
signature
}
fn check_cert<'a, 'b>(cert_der :&[u8], cert :&'a Certificate, alg :&SignatureAlgorithm,
sign_fn :impl FnOnce(&'a Certificate, &'b [u8]) -> Vec<u8>) {
println!("{}", cert.serialize_pem().unwrap());
check_cert_ca(cert_der, cert, cert_der, alg, alg, sign_fn);
}
fn check_cert_ca<'a, 'b>(cert_der :&[u8], cert :&'a Certificate, ca_der :&[u8],
cert_alg :&SignatureAlgorithm, ca_alg :&SignatureAlgorithm,
sign_fn :impl FnOnce(&'a Certificate, &'b [u8]) -> Vec<u8>) {
let trust_anchor = TrustAnchor::try_from_cert_der(&ca_der).unwrap();
let trust_anchor_list = &[trust_anchor];
let trust_anchors = TlsServerTrustAnchors(trust_anchor_list);
let end_entity_cert = EndEntityCert::try_from(cert_der).unwrap();
// Set time to Jan 10, 2004
let time = Time::from_seconds_since_unix_epoch(0x40_00_00_00);
// (1/3) Check whether the cert is valid
end_entity_cert.verify_is_valid_tls_server_cert(
&[&cert_alg, &ca_alg],
&trust_anchors,
&[],
time,
).expect("valid TLS server cert");
// (2/3) Check that the cert is valid for the given DNS name
let dns_name = DnsNameRef::try_from_ascii_str("crabs.crabs").unwrap();
end_entity_cert.verify_is_valid_for_dns_name(
dns_name,
).expect("valid for DNS name");
// (3/3) Check that a message signed by the cert is valid.
let msg = b"Hello, World! This message is signed.";
let signature = sign_fn(&cert, msg);
end_entity_cert.verify_signature(
&cert_alg,
msg,
&signature,
).expect("signature is valid");
}
#[test]
fn test_webpki() {
let params = util::default_params();
let cert = Certificate::from_params(params).unwrap();
// Now verify the certificate.
let cert_der = cert.serialize_der().unwrap();
let sign_fn = |cert, msg| sign_msg_ecdsa(cert, msg,
&signature::ECDSA_P256_SHA256_ASN1_SIGNING);
check_cert(&cert_der, &cert, &webpki::ECDSA_P256_SHA256, sign_fn);
}
#[test]
fn test_webpki_256() {
let mut params = util::default_params();
params.alg = &rcgen::PKCS_ECDSA_P256_SHA256;
let cert = Certificate::from_params(params).unwrap();
// Now verify the certificate.
let cert_der = cert.serialize_der().unwrap();
let sign_fn = |cert, msg| sign_msg_ecdsa(cert, msg,
&signature::ECDSA_P256_SHA256_ASN1_SIGNING);
check_cert(&cert_der, &cert, &webpki::ECDSA_P256_SHA256, sign_fn);
}
#[test]
fn test_webpki_384() {
let mut params = util::default_params();
params.alg = &rcgen::PKCS_ECDSA_P384_SHA384;
let cert = Certificate::from_params(params).unwrap();
// Now verify the certificate.
let cert_der = cert.serialize_der().unwrap();
let sign_fn = |cert, msg| sign_msg_ecdsa(cert, msg,
&signature::ECDSA_P384_SHA384_ASN1_SIGNING);
check_cert(&cert_der, &cert, &webpki::ECDSA_P384_SHA384, sign_fn);
}
#[test]
fn test_webpki_25519() {
let mut params = util::default_params();
params.alg = &rcgen::PKCS_ED25519;
let cert = Certificate::from_params(params).unwrap();
// Now verify the certificate.
let cert_der = cert.serialize_der().unwrap();
check_cert(&cert_der, &cert, &webpki::ED25519, &sign_msg_ed25519);
}
#[test]
fn test_webpki_25519_v1_given() {
let mut params = util::default_params();
params.alg = &rcgen::PKCS_ED25519;
let kp = rcgen::KeyPair::from_pem(util::ED25519_TEST_KEY_PAIR_PEM_V1).unwrap();
params.key_pair = Some(kp);
let cert = Certificate::from_params(params).unwrap();
// Now verify the certificate.
let cert_der = cert.serialize_der().unwrap();
check_cert(&cert_der, &cert, &webpki::ED25519, &sign_msg_ed25519);
}
#[test]
fn test_webpki_25519_v2_given() {
let mut params = util::default_params();
params.alg = &rcgen::PKCS_ED25519;
let kp = rcgen::KeyPair::from_pem(util::ED25519_TEST_KEY_PAIR_PEM_V2).unwrap();
params.key_pair = Some(kp);
let cert = Certificate::from_params(params).unwrap();
// Now verify the certificate.
let cert_der = cert.serialize_der().unwrap();
check_cert(&cert_der, &cert, &webpki::ED25519, &sign_msg_ed25519);
}
#[test]
fn test_webpki_rsa_given() {
let mut params = util::default_params();
params.alg = &rcgen::PKCS_RSA_SHA256;
let kp = rcgen::KeyPair::from_pem(util::RSA_TEST_KEY_PAIR_PEM).unwrap();
params.key_pair = Some(kp);
let cert = Certificate::from_params(params).unwrap();
// Now verify the certificate.
let cert_der = cert.serialize_der().unwrap();
check_cert(&cert_der, &cert, &webpki::RSA_PKCS1_2048_8192_SHA256,
&sign_msg_rsa);
}
#[test]
fn test_webpki_separate_ca() {
let mut params = util::default_params();
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
let ca_cert = Certificate::from_params(params).unwrap();
let ca_der = ca_cert.serialize_der().unwrap();
let mut params = CertificateParams::new(vec!["crabs.crabs".to_string()]);
params.distinguished_name.push(DnType::OrganizationName, "Crab widgits SE");
params.distinguished_name.push(DnType::CommonName, "Dev domain");
let cert = Certificate::from_params(params).unwrap();
let cert_der = cert.serialize_der_with_signer(&ca_cert).unwrap();
let sign_fn = |cert, msg| sign_msg_ecdsa(cert, msg,
&signature::ECDSA_P256_SHA256_ASN1_SIGNING);
check_cert_ca(&cert_der, &cert, &ca_der,
&webpki::ECDSA_P256_SHA256, &webpki::ECDSA_P256_SHA256, sign_fn);
}
#[test]
fn test_webpki_separate_ca_with_other_signing_alg() {
let mut params = util::default_params();
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
params.alg = &rcgen::PKCS_ECDSA_P256_SHA256;
let ca_cert = Certificate::from_params(params).unwrap();
let ca_der = ca_cert.serialize_der().unwrap();
let mut params = CertificateParams::new(vec!["crabs.crabs".to_string()]);
params.alg = &rcgen::PKCS_ED25519;
params.distinguished_name.push(DnType::OrganizationName, "Crab widgits SE");
params.distinguished_name.push(DnType::CommonName, "Dev domain");
let cert = Certificate::from_params(params).unwrap();
let cert_der = cert.serialize_der_with_signer(&ca_cert).unwrap();
check_cert_ca(&cert_der, &cert, &ca_der,
&webpki::ED25519, &webpki::ECDSA_P256_SHA256, sign_msg_ed25519);
}
/*
// TODO https://github.com/briansmith/webpki/issues/134
// TODO https://github.com/briansmith/webpki/issues/135
#[test]
fn test_webpki_separate_ca_name_constraints() {
let mut params = util::default_params();
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
params.name_constraints = Some(NameConstraints {
// TODO also add a test with non-empty permitted_subtrees that
// doesn't contain a DirectoryName entry. This isn't possible
// currently due to a limitation of webpki.
permitted_subtrees : vec![GeneralSubtree::DnsName("dev".to_string()), GeneralSubtree::DirectoryName(rcgen::DistinguishedName::new())],
//permitted_subtrees : vec![GeneralSubtree::DnsName("dev".to_string())],
//permitted_subtrees : Vec::new(),
//excluded_subtrees : vec![GeneralSubtree::DnsName("v".to_string())],
excluded_subtrees : Vec::new(),
});
let ca_cert = Certificate::from_params(params).unwrap();
println!("{}", ca_cert.serialize_pem().unwrap());
let ca_der = ca_cert.serialize_der().unwrap();
let mut params = CertificateParams::new(vec!["crabs.dev".to_string()]);
params.distinguished_name = rcgen::DistinguishedName::new();
//params.distinguished_name.push(DnType::OrganizationName, "Crab widgits SE");
//params.distinguished_name.push(DnType::CommonName, "Dev domain");
let cert = Certificate::from_params(params).unwrap();
let cert_der = cert.serialize_der_with_signer(&ca_cert).unwrap();
println!("{}", cert.serialize_pem_with_signer(&ca_cert).unwrap());
let sign_fn = |cert, msg| sign_msg_ecdsa(cert, msg,
&signature::ECDSA_P256_SHA256_ASN1_SIGNING);
check_cert_ca(&cert_der, &cert, &ca_der,
&webpki::ECDSA_P256_SHA256, sign_fn);
}
*/
#[cfg(feature = "x509-parser")]
#[test]
fn test_webpki_imported_ca() {
use std::convert::TryInto;
let mut params = util::default_params();
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
let ca_cert = Certificate::from_params(params).unwrap();
let (ca_cert_der, ca_key_der) = (ca_cert.serialize_der().unwrap(), ca_cert.serialize_private_key_der());
let ca_key_pair = ca_key_der.as_slice().try_into().unwrap();
let imported_ca_cert_params = CertificateParams::from_ca_cert_der(ca_cert_der.as_slice(), ca_key_pair)
.unwrap();
let imported_ca_cert = Certificate::from_params(imported_ca_cert_params).unwrap();
let mut params = CertificateParams::new(vec!["crabs.crabs".to_string()]);
params.distinguished_name.push(DnType::OrganizationName, "Crab widgits SE");
params.distinguished_name.push(DnType::CommonName, "Dev domain");
let cert = Certificate::from_params(params).unwrap();
let cert_der = cert.serialize_der_with_signer(&imported_ca_cert).unwrap();
let sign_fn = |cert, msg| sign_msg_ecdsa(cert, msg,
&signature::ECDSA_P256_SHA256_ASN1_SIGNING);
check_cert_ca(&cert_der, &cert, &ca_cert_der,
&webpki::ECDSA_P256_SHA256, &webpki::ECDSA_P256_SHA256, sign_fn);
}
#[cfg(feature = "x509-parser")]
#[test]
fn test_certificate_from_csr() {
let mut params = CertificateParams::new(vec!["crabs.crabs".to_string()]);
params.distinguished_name.push(DnType::OrganizationName, "Crab widgits SE");
params.distinguished_name.push(DnType::CommonName, "Dev domain");
let cert = Certificate::from_params(params).unwrap();
let csr_der = cert.serialize_request_der().unwrap();
let csr = CertificateSigningRequest::from_der(&csr_der).unwrap();
let mut params = util::default_params();
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
let ca_cert = Certificate::from_params(params).unwrap();
let ca_cert_der = ca_cert.serialize_der().unwrap();
let cert_der = csr.serialize_der_with_signer(&ca_cert).unwrap();
let sign_fn = |cert, msg| sign_msg_ecdsa(cert, msg,
&signature::ECDSA_P256_SHA256_ASN1_SIGNING);
check_cert_ca(&cert_der, &cert, &ca_cert_der,
&webpki::ECDSA_P256_SHA256, &webpki::ECDSA_P256_SHA256, sign_fn);
}
| 35.336449 | 136 | 0.742484 |
e211d8c4609be575ecaf60cb87f2dd2cf84e1ce6 | 4,137 | use super::env::EnvConfig;
use crate::filesystem::default_config_pathbuf;
use crate::finder::FinderChoice;
use crate::fs;
use crate::terminal::style::Color as TerminalColor;
use anyhow::Result;
use serde::{de, Deserialize};
use std::convert::TryFrom;
use std::io::BufReader;
use std::path::Path;
use std::path::PathBuf;
use std::str::FromStr;
#[derive(Deserialize)]
pub struct Color(#[serde(deserialize_with = "color_deserialize")] TerminalColor);
impl Color {
pub fn get(&self) -> TerminalColor {
self.0
}
}
fn color_deserialize<'de, D>(deserializer: D) -> Result<TerminalColor, D::Error>
where
D: de::Deserializer<'de>,
{
let s: String = Deserialize::deserialize(deserializer)?;
TerminalColor::try_from(s.as_str())
.map_err(|_| de::Error::custom(format!("Failed to deserialize color: {}", s)))
}
#[derive(Deserialize)]
#[serde(default)]
pub struct ColorWidth {
pub color: Color,
pub width_percentage: u16,
pub min_width: u16,
}
#[derive(Deserialize)]
#[serde(default)]
pub struct Style {
pub tag: ColorWidth,
pub comment: ColorWidth,
pub snippet: ColorWidth,
}
#[derive(Deserialize)]
#[serde(default)]
pub struct Finder {
#[serde(deserialize_with = "finder_deserialize")]
pub command: FinderChoice,
pub overrides: Option<String>,
pub overrides_var: Option<String>,
}
fn finder_deserialize<'de, D>(deserializer: D) -> Result<FinderChoice, D::Error>
where
D: de::Deserializer<'de>,
{
let s: String = Deserialize::deserialize(deserializer)?;
FinderChoice::from_str(s.to_lowercase().as_str())
.map_err(|_| de::Error::custom(format!("Failed to deserialize finder: {}", s)))
}
#[derive(Deserialize)]
#[serde(default)]
pub struct Cheats {
pub path: Option<String>,
}
#[derive(Deserialize)]
#[serde(default)]
pub struct Search {
pub tags: Option<String>,
}
#[derive(Deserialize)]
#[serde(default)]
pub struct Shell {
pub command: String,
}
#[derive(Deserialize, Default)]
#[serde(default)]
pub struct YamlConfig {
pub style: Style,
pub finder: Finder,
pub cheats: Cheats,
pub search: Search,
pub shell: Shell,
}
impl YamlConfig {
fn from_str(text: &str) -> Result<Self> {
serde_yaml::from_str(&text).map_err(|e| e.into())
}
fn from_path(path: &Path) -> Result<Self> {
let file = fs::open(path)?;
let reader = BufReader::new(file);
serde_yaml::from_reader(reader).map_err(|e| e.into())
}
pub fn get(env: &EnvConfig) -> Result<Self> {
if let Some(yaml) = env.config_yaml.as_ref() {
return Self::from_str(yaml);
}
if let Some(path_str) = env.config_path.as_ref() {
let p = PathBuf::from(path_str);
return YamlConfig::from_path(&p);
}
if let Ok(p) = default_config_pathbuf() {
if p.exists() {
return YamlConfig::from_path(&p);
}
}
Ok(YamlConfig::default())
}
}
impl Default for ColorWidth {
fn default() -> Self {
Self {
color: Color(TerminalColor::Blue),
width_percentage: 26,
min_width: 20,
}
}
}
impl Default for Style {
fn default() -> Self {
Self {
tag: ColorWidth {
color: Color(TerminalColor::Cyan),
width_percentage: 26,
min_width: 20,
},
comment: ColorWidth {
color: Color(TerminalColor::Blue),
width_percentage: 42,
min_width: 45,
},
snippet: Default::default(),
}
}
}
impl Default for Finder {
fn default() -> Self {
Self {
command: FinderChoice::Fzf,
overrides: None,
overrides_var: None,
}
}
}
impl Default for Cheats {
fn default() -> Self {
Self { path: None }
}
}
impl Default for Search {
fn default() -> Self {
Self { tags: None }
}
}
impl Default for Shell {
fn default() -> Self {
Self {
command: "bash".to_string(),
}
}
}
| 23.111732 | 87 | 0.589799 |
0eb57dc63b933341e7f7f8307fe2cd681c702210 | 2,058 | use super::tokenize::Loc;
macro_rules! error {
($fmt:expr) => {
eprintln!($fmt);
std::process::exit(1);
};
($fmt:expr, $($arg:tt)*) => {
eprintln!($fmt, $($arg)*);
std::process::exit(1);
};
}
macro_rules! error_at_impl {
($src:expr, $at:expr, $msg:expr) => {
use crate::error::get_error_line;
let (line, corr) = get_error_line(&$src.code, $at);
let path_row = match $src.path {
Some(ref p) => format!("{}:{}: ", p, $at.row + 1),
None => format!("-:{}: ", $at.row + 1),
};
let at = $at.col + corr + path_row.chars().count();
eprintln!("{}{}", path_row, line);
eprint!("{}^ ", " ".repeat(at));
eprintln!("{}", $msg);
std::process::exit(1);
};
}
macro_rules! error_at {
($src:expr, $at:expr, $fmt:expr) => {
error_at_impl!($src, $at, $fmt);
};
($src:expr, $at:expr, $fmt:expr, $($arg:tt)*) => {
let msg = format!($fmt, $($arg)*);
error_at_impl!($src, $at, msg);
};
}
macro_rules! error_tok {
($tok:expr, $fmt:expr) => {
error_at!($tok.common.src, $tok.common.loc, $fmt);
};
($tok:expr, $fmt:expr, $($arg:tt)*) => {
error_at!($tok.common.src, $tok.common.loc, $fmt, $($arg)*);
};
}
pub fn get_error_line(src: &str, loc: Loc) -> (String, usize) {
let mut line = String::new();
let mut cur_row = 0;
let mut cur_col = 0;
let mut correction = 0;
for c in src.chars() {
if cur_row > loc.row {
break;
}
if c == '\n' {
cur_row += 1;
continue;
}
if cur_row == loc.row {
cur_col += 1;
if c == '\t' {
// タブはスペース4つに変換する
line.push_str(" ");
// タブをスペースに変換した分colに加算する
if cur_col < loc.col {
correction += 3;
}
} else {
line.push(c);
};
}
}
(line, correction)
}
| 24.211765 | 68 | 0.438776 |
8a640e9ce7581c829b5ab07bc9f7a4c6dc6d7fc0 | 1,137 | /*
* The Jira Cloud platform REST API
*
* Jira Cloud platform REST API documentation
*
* The version of the OpenAPI document: 1001.0.0-SNAPSHOT
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
/// EntityProperty : An entity property, for more information see [Entity properties](https://developer.atlassian.com/cloud/jira/platform/jira-entity-properties/).
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EntityProperty {
/// The key of the property. Required on create and update.
#[serde(rename = "key", skip_serializing_if = "Option::is_none")]
pub key: Option<String>,
/// The value of the property. Required on create and update.
#[serde(rename = "value", skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
}
impl EntityProperty {
/// An entity property, for more information see [Entity properties](https://developer.atlassian.com/cloud/jira/platform/jira-entity-properties/).
pub fn new() -> EntityProperty {
EntityProperty {
key: None,
value: None,
}
}
}
| 35.53125 | 163 | 0.692172 |
dd71ef85c8f7edd4f032fe522c2f3327b3df4ce8 | 7,998 | // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
use super::message::*;
use super::*;
use std::os::unix::io::RawFd;
pub const MAX_VRING_NUM: usize = 2;
pub const VIRTIO_FEATURES: u64 = 0x40000003;
#[derive(Default)]
pub struct DummySlave {
pub owned: bool,
pub features_acked: bool,
pub acked_features: u64,
pub acked_protocol_features: u64,
pub vring_num: usize,
pub call_fd: [Option<RawFd>; MAX_VRING_NUM],
pub kick_fd: [Option<RawFd>; MAX_VRING_NUM],
pub err_fd: [Option<RawFd>; MAX_VRING_NUM],
pub vring_started: [bool; MAX_VRING_NUM],
pub vring_enabled: [bool; MAX_VRING_NUM],
}
impl DummySlave {
pub fn new() -> Self {
DummySlave {
vring_num: 1,
..Default::default()
}
}
}
impl VhostUserSlave for DummySlave {
fn set_owner(&mut self) -> Result<()> {
if self.owned {
return Err(Error::InvalidOperation);
}
self.owned = true;
Ok(())
}
fn reset_owner(&mut self) -> Result<()> {
self.owned = false;
self.features_acked = false;
self.acked_features = 0;
self.acked_protocol_features = 0;
Ok(())
}
fn get_features(&mut self) -> Result<u64> {
Ok(VIRTIO_FEATURES)
}
fn set_features(&mut self, features: u64) -> Result<()> {
if !self.owned {
return Err(Error::InvalidOperation);
} else if self.features_acked {
return Err(Error::InvalidOperation);
} else if (features & !VIRTIO_FEATURES) != 0 {
return Err(Error::InvalidParam);
}
self.acked_features = features;
self.features_acked = true;
// If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
// the ring is initialized in an enabled state.
// If VHOST_USER_F_PROTOCOL_FEATURES has been negotiated,
// the ring is initialized in a disabled state. Client must not
// pass data to/from the backend until ring is enabled by
// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has
// been disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
let vring_enabled =
self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0;
for enabled in &mut self.vring_enabled {
*enabled = vring_enabled;
}
Ok(())
}
fn get_protocol_features(&mut self) -> Result<u64> {
Ok(VhostUserProtocolFeatures::all().bits())
}
fn set_protocol_features(&mut self, features: u64) -> Result<()> {
// Note: slave that reported VHOST_USER_F_PROTOCOL_FEATURES must
// support this message even before VHOST_USER_SET_FEATURES was
// called.
// What happens if the master calls set_features() with
// VHOST_USER_F_PROTOCOL_FEATURES cleared after calling this
// interface?
self.acked_protocol_features = features;
Ok(())
}
fn set_mem_table(&mut self, _ctx: &[VhostUserMemoryRegion], _fds: &[RawFd]) -> Result<()> {
// TODO
Ok(())
}
fn get_queue_num(&mut self) -> Result<u64> {
Ok(MAX_VRING_NUM as u64)
}
fn set_vring_num(&mut self, _index: u32, _num: u32) -> Result<()> {
// TODO
Ok(())
}
fn set_vring_addr(
&mut self,
_index: u32,
_flags: VhostUserVringAddrFlags,
_descriptor: u64,
_used: u64,
_available: u64,
_log: u64,
) -> Result<()> {
// TODO
Ok(())
}
fn set_vring_base(&mut self, _index: u32, _base: u32) -> Result<()> {
// TODO
Ok(())
}
fn get_vring_base(&mut self, index: u32) -> Result<VhostUserVringState> {
Ok(VhostUserVringState::new(index, 0))
}
fn set_vring_kick(&mut self, index: u8, fd: Option<RawFd>) -> Result<()> {
if index as usize >= MAX_VRING_NUM || index as usize > self.vring_num {
return Err(Error::InvalidParam);
}
if self.kick_fd[index as usize].is_some() {
// Close file descriptor set by previous operations.
let _ = nix::unistd::close(self.kick_fd[index as usize].unwrap());
}
self.kick_fd[index as usize] = fd;
// Quotation from vhost-user spec:
// Client must start ring upon receiving a kick (that is, detecting
// that file descriptor is readable) on the descriptor specified by
// VHOST_USER_SET_VRING_KICK, and stop ring upon receiving
// VHOST_USER_GET_VRING_BASE.
//
// So we should add fd to event monitor(select, poll, epoll) here.
self.vring_started[index as usize] = true;
Ok(())
}
fn set_vring_call(&mut self, index: u8, fd: Option<RawFd>) -> Result<()> {
if index as usize >= MAX_VRING_NUM || index as usize > self.vring_num {
return Err(Error::InvalidParam);
}
if self.call_fd[index as usize].is_some() {
// Close file descriptor set by previous operations.
let _ = nix::unistd::close(self.call_fd[index as usize].unwrap());
}
self.call_fd[index as usize] = fd;
Ok(())
}
fn set_vring_err(&mut self, index: u8, fd: Option<RawFd>) -> Result<()> {
if index as usize >= MAX_VRING_NUM || index as usize > self.vring_num {
return Err(Error::InvalidParam);
}
if self.err_fd[index as usize].is_some() {
// Close file descriptor set by previous operations.
let _ = nix::unistd::close(self.err_fd[index as usize].unwrap());
}
self.err_fd[index as usize] = fd;
Ok(())
}
/*
Client must start ring upon receiving a kick (that is, detecting that file
descriptor is readable) on the descriptor specified by
VHOST_USER_SET_VRING_KICK, and stop ring upon receiving
VHOST_USER_GET_VRING_BASE.
*/
fn set_vring_enable(&mut self, index: u32, enable: bool) -> Result<()> {
// This request should be handled only when VHOST_USER_F_PROTOCOL_FEATURES
// has been negotiated.
if self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 {
return Err(Error::InvalidOperation);
} else if index as usize >= MAX_VRING_NUM || index as usize > self.vring_num {
return Err(Error::InvalidParam);
}
// Slave must not pass data to/from the backend until ring is
// enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1,
// or after it has been disabled by VHOST_USER_SET_VRING_ENABLE
// with parameter 0.
self.vring_enabled[index as usize] = enable;
Ok(())
}
fn get_config(
&mut self,
offset: u32,
size: u32,
_flags: VhostUserConfigFlags,
) -> Result<Vec<u8>> {
if self.acked_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return Err(Error::InvalidOperation);
} else if offset < VHOST_USER_CONFIG_OFFSET
|| offset >= VHOST_USER_CONFIG_SIZE
|| size > VHOST_USER_CONFIG_SIZE - VHOST_USER_CONFIG_OFFSET
|| size + offset > VHOST_USER_CONFIG_SIZE
{
return Err(Error::InvalidParam);
}
Ok(vec![0xa5; size as usize])
}
fn set_config(&mut self, offset: u32, buf: &[u8], _flags: VhostUserConfigFlags) -> Result<()> {
let size = buf.len() as u32;
if self.acked_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return Err(Error::InvalidOperation);
} else if offset < VHOST_USER_CONFIG_OFFSET
|| offset >= VHOST_USER_CONFIG_SIZE
|| size > VHOST_USER_CONFIG_SIZE - VHOST_USER_CONFIG_OFFSET
|| size + offset > VHOST_USER_CONFIG_SIZE
{
return Err(Error::InvalidParam);
}
Ok(())
}
}
| 33.889831 | 99 | 0.6014 |
89789c3d8513538179011a8f850d0ed021f1c7eb | 13,263 | //! This module contains paths to types and functions Clippy needs to know
//! about.
//!
//! Whenever possible, please consider diagnostic items over hardcoded paths.
//! See <https://github.com/rust-lang/rust-clippy/issues/5393> for more information.
#[cfg(feature = "internal")]
pub const APPLICABILITY: [&str; 2] = ["rustc_lint_defs", "Applicability"];
#[cfg(feature = "internal")]
pub const APPLICABILITY_VALUES: [[&str; 3]; 4] = [
["rustc_lint_defs", "Applicability", "Unspecified"],
["rustc_lint_defs", "Applicability", "HasPlaceholders"],
["rustc_lint_defs", "Applicability", "MaybeIncorrect"],
["rustc_lint_defs", "Applicability", "MachineApplicable"],
];
#[cfg(feature = "internal")]
pub const DIAGNOSTIC_BUILDER: [&str; 3] = ["rustc_errors", "diagnostic_builder", "DiagnosticBuilder"];
pub const ARC_PTR_EQ: [&str; 4] = ["alloc", "sync", "Arc", "ptr_eq"];
pub const ASMUT_TRAIT: [&str; 3] = ["core", "convert", "AsMut"];
pub const ASREF_TRAIT: [&str; 3] = ["core", "convert", "AsRef"];
pub const BTREEMAP_CONTAINS_KEY: [&str; 6] = ["alloc", "collections", "btree", "map", "BTreeMap", "contains_key"];
pub const BTREEMAP_ENTRY: [&str; 6] = ["alloc", "collections", "btree", "map", "entry", "Entry"];
pub const BTREEMAP_INSERT: [&str; 6] = ["alloc", "collections", "btree", "map", "BTreeMap", "insert"];
pub const CLONE_TRAIT_METHOD: [&str; 4] = ["core", "clone", "Clone", "clone"];
pub const COW: [&str; 3] = ["alloc", "borrow", "Cow"];
pub const CSTRING_AS_C_STR: [&str; 5] = ["alloc", "ffi", "c_str", "CString", "as_c_str"];
pub const DEFAULT_TRAIT_METHOD: [&str; 4] = ["core", "default", "Default", "default"];
pub const DEREF_MUT_TRAIT_METHOD: [&str; 5] = ["core", "ops", "deref", "DerefMut", "deref_mut"];
/// Preferably use the diagnostic item `sym::deref_method` where possible
pub const DEREF_TRAIT_METHOD: [&str; 5] = ["core", "ops", "deref", "Deref", "deref"];
pub const DIR_BUILDER: [&str; 3] = ["std", "fs", "DirBuilder"];
pub const DISPLAY_TRAIT: [&str; 3] = ["core", "fmt", "Display"];
#[cfg(feature = "internal")]
pub const EARLY_CONTEXT: [&str; 2] = ["rustc_lint", "EarlyContext"];
#[cfg(feature = "internal")]
pub const EARLY_LINT_PASS: [&str; 3] = ["rustc_lint", "passes", "EarlyLintPass"];
pub const EXIT: [&str; 3] = ["std", "process", "exit"];
pub const F32_EPSILON: [&str; 4] = ["core", "f32", "<impl f32>", "EPSILON"];
pub const F64_EPSILON: [&str; 4] = ["core", "f64", "<impl f64>", "EPSILON"];
pub const FILE: [&str; 3] = ["std", "fs", "File"];
pub const FILE_TYPE: [&str; 3] = ["std", "fs", "FileType"];
pub const FROM_FROM: [&str; 4] = ["core", "convert", "From", "from"];
pub const FROM_ITERATOR_METHOD: [&str; 6] = ["core", "iter", "traits", "collect", "FromIterator", "from_iter"];
pub const FROM_STR_METHOD: [&str; 5] = ["core", "str", "traits", "FromStr", "from_str"];
pub const FUTURE_FROM_GENERATOR: [&str; 3] = ["core", "future", "from_generator"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const FUTURES_IO_ASYNCREADEXT: [&str; 3] = ["futures_util", "io", "AsyncReadExt"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const FUTURES_IO_ASYNCWRITEEXT: [&str; 3] = ["futures_util", "io", "AsyncWriteExt"];
pub const HASHMAP_CONTAINS_KEY: [&str; 6] = ["std", "collections", "hash", "map", "HashMap", "contains_key"];
pub const HASHMAP_ENTRY: [&str; 5] = ["std", "collections", "hash", "map", "Entry"];
pub const HASHMAP_INSERT: [&str; 6] = ["std", "collections", "hash", "map", "HashMap", "insert"];
#[cfg(feature = "internal")]
pub const IDENT: [&str; 3] = ["rustc_span", "symbol", "Ident"];
#[cfg(feature = "internal")]
pub const IDENT_AS_STR: [&str; 4] = ["rustc_span", "symbol", "Ident", "as_str"];
pub const INDEX: [&str; 3] = ["core", "ops", "Index"];
pub const INDEX_MUT: [&str; 3] = ["core", "ops", "IndexMut"];
pub const INSERT_STR: [&str; 4] = ["alloc", "string", "String", "insert_str"];
pub const IO_READ: [&str; 3] = ["std", "io", "Read"];
pub const IO_WRITE: [&str; 3] = ["std", "io", "Write"];
pub const IPADDR_V4: [&str; 5] = ["std", "net", "ip", "IpAddr", "V4"];
pub const IPADDR_V6: [&str; 5] = ["std", "net", "ip", "IpAddr", "V6"];
pub const ITER_COUNT: [&str; 6] = ["core", "iter", "traits", "iterator", "Iterator", "count"];
pub const ITER_REPEAT: [&str; 5] = ["core", "iter", "sources", "repeat", "repeat"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const ITERTOOLS_NEXT_TUPLE: [&str; 3] = ["itertools", "Itertools", "next_tuple"];
#[cfg(feature = "internal")]
pub const KW_MODULE: [&str; 3] = ["rustc_span", "symbol", "kw"];
#[cfg(feature = "internal")]
pub const LATE_CONTEXT: [&str; 2] = ["rustc_lint", "LateContext"];
#[cfg(feature = "internal")]
pub const LATE_LINT_PASS: [&str; 3] = ["rustc_lint", "passes", "LateLintPass"];
#[cfg(feature = "internal")]
pub const LINT: [&str; 2] = ["rustc_lint_defs", "Lint"];
pub const MEM_SWAP: [&str; 3] = ["core", "mem", "swap"];
pub const MUTEX_GUARD: [&str; 4] = ["std", "sync", "mutex", "MutexGuard"];
pub const OPEN_OPTIONS: [&str; 3] = ["std", "fs", "OpenOptions"];
/// Preferably use the diagnostic item `sym::Option` where possible
pub const OPTION: [&str; 3] = ["core", "option", "Option"];
pub const OPTION_NONE: [&str; 4] = ["core", "option", "Option", "None"];
pub const OPTION_SOME: [&str; 4] = ["core", "option", "Option", "Some"];
pub const ORD: [&str; 3] = ["core", "cmp", "Ord"];
pub const OS_STRING_AS_OS_STR: [&str; 5] = ["std", "ffi", "os_str", "OsString", "as_os_str"];
pub const OS_STR_TO_OS_STRING: [&str; 5] = ["std", "ffi", "os_str", "OsStr", "to_os_string"];
pub const PARKING_LOT_RAWMUTEX: [&str; 3] = ["parking_lot", "raw_mutex", "RawMutex"];
pub const PARKING_LOT_RAWRWLOCK: [&str; 3] = ["parking_lot", "raw_rwlock", "RawRwLock"];
pub const PARKING_LOT_MUTEX_GUARD: [&str; 3] = ["lock_api", "mutex", "MutexGuard"];
pub const PARKING_LOT_RWLOCK_READ_GUARD: [&str; 3] = ["lock_api", "rwlock", "RwLockReadGuard"];
pub const PARKING_LOT_RWLOCK_WRITE_GUARD: [&str; 3] = ["lock_api", "rwlock", "RwLockWriteGuard"];
pub const PATH_BUF_AS_PATH: [&str; 4] = ["std", "path", "PathBuf", "as_path"];
pub const PATH_TO_PATH_BUF: [&str; 4] = ["std", "path", "Path", "to_path_buf"];
pub const PERMISSIONS: [&str; 3] = ["std", "fs", "Permissions"];
#[cfg_attr(not(unix), allow(clippy::invalid_paths))]
pub const PERMISSIONS_FROM_MODE: [&str; 6] = ["std", "os", "unix", "fs", "PermissionsExt", "from_mode"];
pub const POLL: [&str; 4] = ["core", "task", "poll", "Poll"];
pub const POLL_PENDING: [&str; 5] = ["core", "task", "poll", "Poll", "Pending"];
pub const POLL_READY: [&str; 5] = ["core", "task", "poll", "Poll", "Ready"];
pub const PTR_COPY: [&str; 3] = ["core", "intrinsics", "copy"];
pub const PTR_COPY_NONOVERLAPPING: [&str; 3] = ["core", "intrinsics", "copy_nonoverlapping"];
pub const PTR_EQ: [&str; 3] = ["core", "ptr", "eq"];
pub const PTR_SLICE_FROM_RAW_PARTS: [&str; 3] = ["core", "ptr", "slice_from_raw_parts"];
pub const PTR_SLICE_FROM_RAW_PARTS_MUT: [&str; 3] = ["core", "ptr", "slice_from_raw_parts_mut"];
pub const PTR_SWAP_NONOVERLAPPING: [&str; 3] = ["core", "ptr", "swap_nonoverlapping"];
pub const PTR_READ: [&str; 3] = ["core", "ptr", "read"];
pub const PTR_READ_UNALIGNED: [&str; 3] = ["core", "ptr", "read_unaligned"];
pub const PTR_READ_VOLATILE: [&str; 3] = ["core", "ptr", "read_volatile"];
pub const PTR_REPLACE: [&str; 3] = ["core", "ptr", "replace"];
pub const PTR_SWAP: [&str; 3] = ["core", "ptr", "swap"];
pub const PTR_UNALIGNED_VOLATILE_LOAD: [&str; 3] = ["core", "intrinsics", "unaligned_volatile_load"];
pub const PTR_UNALIGNED_VOLATILE_STORE: [&str; 3] = ["core", "intrinsics", "unaligned_volatile_store"];
pub const PTR_WRITE: [&str; 3] = ["core", "ptr", "write"];
pub const PTR_WRITE_BYTES: [&str; 3] = ["core", "intrinsics", "write_bytes"];
pub const PTR_WRITE_UNALIGNED: [&str; 3] = ["core", "ptr", "write_unaligned"];
pub const PTR_WRITE_VOLATILE: [&str; 3] = ["core", "ptr", "write_volatile"];
pub const PUSH_STR: [&str; 4] = ["alloc", "string", "String", "push_str"];
pub const RANGE_ARGUMENT_TRAIT: [&str; 3] = ["core", "ops", "RangeBounds"];
pub const RC_PTR_EQ: [&str; 4] = ["alloc", "rc", "Rc", "ptr_eq"];
pub const REFCELL_REF: [&str; 3] = ["core", "cell", "Ref"];
pub const REFCELL_REFMUT: [&str; 3] = ["core", "cell", "RefMut"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const REGEX_BUILDER_NEW: [&str; 5] = ["regex", "re_builder", "unicode", "RegexBuilder", "new"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const REGEX_BYTES_BUILDER_NEW: [&str; 5] = ["regex", "re_builder", "bytes", "RegexBuilder", "new"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const REGEX_BYTES_NEW: [&str; 4] = ["regex", "re_bytes", "Regex", "new"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const REGEX_BYTES_SET_NEW: [&str; 5] = ["regex", "re_set", "bytes", "RegexSet", "new"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const REGEX_NEW: [&str; 4] = ["regex", "re_unicode", "Regex", "new"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const REGEX_SET_NEW: [&str; 5] = ["regex", "re_set", "unicode", "RegexSet", "new"];
/// Preferably use the diagnostic item `sym::Result` where possible
pub const RESULT: [&str; 3] = ["core", "result", "Result"];
pub const RESULT_ERR: [&str; 4] = ["core", "result", "Result", "Err"];
pub const RESULT_OK: [&str; 4] = ["core", "result", "Result", "Ok"];
#[cfg(feature = "internal")]
pub const RUSTC_VERSION: [&str; 2] = ["rustc_semver", "RustcVersion"];
pub const RWLOCK_READ_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockReadGuard"];
pub const RWLOCK_WRITE_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockWriteGuard"];
pub const SERDE_DESERIALIZE: [&str; 3] = ["serde", "de", "Deserialize"];
pub const SERDE_DE_VISITOR: [&str; 3] = ["serde", "de", "Visitor"];
pub const SLICE_FROM_RAW_PARTS: [&str; 4] = ["core", "slice", "raw", "from_raw_parts"];
pub const SLICE_FROM_RAW_PARTS_MUT: [&str; 4] = ["core", "slice", "raw", "from_raw_parts_mut"];
pub const SLICE_GET: [&str; 4] = ["core", "slice", "<impl [T]>", "get"];
pub const SLICE_INTO_VEC: [&str; 4] = ["alloc", "slice", "<impl [T]>", "into_vec"];
pub const SLICE_ITER: [&str; 4] = ["core", "slice", "iter", "Iter"];
pub const STDERR: [&str; 4] = ["std", "io", "stdio", "stderr"];
pub const STDOUT: [&str; 4] = ["std", "io", "stdio", "stdout"];
pub const CONVERT_IDENTITY: [&str; 3] = ["core", "convert", "identity"];
pub const STD_FS_CREATE_DIR: [&str; 3] = ["std", "fs", "create_dir"];
pub const STRING_AS_MUT_STR: [&str; 4] = ["alloc", "string", "String", "as_mut_str"];
pub const STRING_AS_STR: [&str; 4] = ["alloc", "string", "String", "as_str"];
pub const STRING_NEW: [&str; 4] = ["alloc", "string", "String", "new"];
pub const STR_BYTES: [&str; 4] = ["core", "str", "<impl str>", "bytes"];
pub const STR_ENDS_WITH: [&str; 4] = ["core", "str", "<impl str>", "ends_with"];
pub const STR_FROM_UTF8: [&str; 4] = ["core", "str", "converts", "from_utf8"];
pub const STR_LEN: [&str; 4] = ["core", "str", "<impl str>", "len"];
pub const STR_STARTS_WITH: [&str; 4] = ["core", "str", "<impl str>", "starts_with"];
#[cfg(feature = "internal")]
pub const SYMBOL: [&str; 3] = ["rustc_span", "symbol", "Symbol"];
#[cfg(feature = "internal")]
pub const SYMBOL_AS_STR: [&str; 4] = ["rustc_span", "symbol", "Symbol", "as_str"];
#[cfg(feature = "internal")]
pub const SYMBOL_INTERN: [&str; 4] = ["rustc_span", "symbol", "Symbol", "intern"];
#[cfg(feature = "internal")]
pub const SYMBOL_TO_IDENT_STRING: [&str; 4] = ["rustc_span", "symbol", "Symbol", "to_ident_string"];
#[cfg(feature = "internal")]
pub const SYM_MODULE: [&str; 3] = ["rustc_span", "symbol", "sym"];
#[cfg(feature = "internal")]
pub const SYNTAX_CONTEXT: [&str; 3] = ["rustc_span", "hygiene", "SyntaxContext"];
pub const TO_OWNED_METHOD: [&str; 4] = ["alloc", "borrow", "ToOwned", "to_owned"];
pub const TO_STRING_METHOD: [&str; 4] = ["alloc", "string", "ToString", "to_string"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const TOKIO_IO_ASYNCREADEXT: [&str; 5] = ["tokio", "io", "util", "async_read_ext", "AsyncReadExt"];
#[expect(clippy::invalid_paths)] // internal lints do not know about all external crates
pub const TOKIO_IO_ASYNCWRITEEXT: [&str; 5] = ["tokio", "io", "util", "async_write_ext", "AsyncWriteExt"];
pub const TRY_FROM: [&str; 4] = ["core", "convert", "TryFrom", "try_from"];
pub const VEC_AS_MUT_SLICE: [&str; 4] = ["alloc", "vec", "Vec", "as_mut_slice"];
pub const VEC_AS_SLICE: [&str; 4] = ["alloc", "vec", "Vec", "as_slice"];
pub const VEC_FROM_ELEM: [&str; 3] = ["alloc", "vec", "from_elem"];
pub const VEC_NEW: [&str; 4] = ["alloc", "vec", "Vec", "new"];
pub const VEC_RESIZE: [&str; 4] = ["alloc", "vec", "Vec", "resize"];
pub const WEAK_ARC: [&str; 3] = ["alloc", "sync", "Weak"];
pub const WEAK_RC: [&str; 3] = ["alloc", "rc", "Weak"];
pub const PTR_NON_NULL: [&str; 4] = ["core", "ptr", "non_null", "NonNull"];
| 70.925134 | 114 | 0.642389 |
f94b3c41cdf80599c9b15e8b6eec63df33710c47 | 15,030 | // Unless explicitly stated otherwise all files in this repository are licensed
// under the MIT/Apache-2.0 License, at your convenience
//
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2020 Datadog, Inc.
//
use crate::{
reactor::Reactor,
sys::{self, Source, SourceType},
};
use futures_lite::ready;
use nix::sys::socket::MsgFlags;
use std::{
cell::Cell,
io,
net::Shutdown,
os::unix::io::{AsRawFd, FromRawFd, RawFd},
rc::{Rc, Weak},
task::{Context, Poll, Waker},
time::{Duration, Instant},
};
type Result<T> = crate::Result<T, ()>;
/// Root trait for socket stream receive buffer
pub trait RxBuf {
fn read(&mut self, buf: &mut [u8]) -> usize;
fn peek(&self, buf: &mut [u8]) -> usize;
fn is_empty(&self) -> bool;
fn as_bytes(&self) -> &[u8];
fn consume(&mut self, amt: usize);
fn buffer_size(&self) -> usize;
fn handle_result(&mut self, result: usize);
fn unfilled(&mut self) -> &mut [u8];
}
#[derive(Debug, Default)]
pub struct NonBuffered;
impl RxBuf for NonBuffered {
fn read(&mut self, _buf: &mut [u8]) -> usize {
0
}
fn peek(&self, _buf: &mut [u8]) -> usize {
0
}
fn is_empty(&self) -> bool {
true
}
fn as_bytes(&self) -> &[u8] {
&[]
}
fn consume(&mut self, _amt: usize) {}
fn buffer_size(&self) -> usize {
0
}
fn handle_result(&mut self, _result: usize) {}
fn unfilled(&mut self) -> &mut [u8] {
&mut []
}
}
/// Trait for receive buffer implementations
pub trait Buffered: RxBuf {}
/// Non-shared fixed sized receive buffer allocated
/// when buffered stream is created
#[derive(Debug)]
pub struct Preallocated {
buf: Vec<u8>,
head: usize,
tail: usize,
cap: usize,
}
impl Preallocated {
const DEFAULT_BUFFER_SIZE: usize = 8192;
/// Creates a fixed sized receive buffer
pub fn new(size: usize) -> Self {
Self {
buf: vec![0; size],
tail: 0,
head: 0,
cap: size,
}
}
}
impl Default for Preallocated {
fn default() -> Self {
Self::new(Self::DEFAULT_BUFFER_SIZE)
}
}
impl Preallocated {
fn len(&self) -> usize {
self.tail - self.head
}
}
impl Buffered for Preallocated {}
impl RxBuf for Preallocated {
fn read(&mut self, buf: &mut [u8]) -> usize {
let sz = std::cmp::min(self.len(), buf.len());
if sz > 0 {
buf[..sz].copy_from_slice(&self.buf[self.head..self.head + sz]);
self.head += sz;
}
sz
}
fn peek(&self, buf: &mut [u8]) -> usize {
let sz = std::cmp::min(self.len(), buf.len());
if sz > 0 {
buf[..sz].copy_from_slice(&self.buf[self.head..self.head + sz]);
}
sz
}
fn is_empty(&self) -> bool {
self.head >= self.tail
}
fn as_bytes(&self) -> &[u8] {
&self.buf[self.head..self.tail]
}
fn consume(&mut self, amt: usize) {
self.head += std::cmp::min(self.len(), amt);
}
fn buffer_size(&self) -> usize {
self.cap
}
fn handle_result(&mut self, result: usize) {
self.tail += result;
}
fn unfilled(&mut self) -> &mut [u8] {
if self.len() == 0 {
self.head = 0;
self.tail = 0;
}
&mut self.buf[self.tail..]
}
}
#[derive(Debug)]
struct Timeout {
id: u64,
timeout: Cell<Option<Duration>>,
timer: Cell<Option<Instant>>,
}
impl Timeout {
fn new(id: u64) -> Self {
Self {
id,
timeout: Cell::new(None),
timer: Cell::new(None),
}
}
fn get(&self) -> Option<Duration> {
self.timeout.get()
}
fn set(&self, dur: Option<Duration>) -> Result<()> {
if let Some(dur) = dur.as_ref() {
if dur.as_nanos() == 0 {
return Err(io::Error::from_raw_os_error(libc::EINVAL).into());
}
}
self.timeout.set(dur);
Ok(())
}
fn maybe_set_timer(&self, reactor: &Reactor, waker: &Waker) {
if let Some(timeout) = self.timeout.get() {
if self.timer.get().is_none() {
let deadline = Instant::now() + timeout;
reactor.insert_timer(self.id, deadline, waker.clone());
self.timer.set(Some(deadline));
}
}
}
fn cancel_timer(&self, reactor: &Reactor) {
if self.timer.take().is_some() {
reactor.remove_timer(self.id);
}
}
fn check(&self, reactor: &Reactor) -> io::Result<()> {
if let Some(deadline) = self.timer.get() {
if !reactor.timer_exists(&(deadline, self.id)) {
reactor.remove_timer(self.id);
self.timer.take();
return Err(io::Error::new(
io::ErrorKind::TimedOut,
"Operation timed out",
));
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct NonBufferedStream<S> {
reactor: Weak<Reactor>,
stream: S,
source_tx: Option<Source>,
source_rx: Option<Source>,
write_timeout: Timeout,
read_timeout: Timeout,
}
impl<S: AsRawFd> NonBufferedStream<S> {
fn init(&mut self) {
let reactor = self.reactor.upgrade().unwrap();
let stream_fd = self.stream.as_raw_fd();
self.source_rx = Some(reactor.poll_read_ready(stream_fd));
}
pub(crate) fn try_peek(&self, buf: &mut [u8]) -> Option<io::Result<usize>> {
super::yolo_peek(self.stream.as_raw_fd(), buf)
}
pub(crate) async fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
let source = self.reactor.upgrade().unwrap().recv(
self.stream.as_raw_fd(),
buf.len(),
MsgFlags::MSG_PEEK,
);
let sz = source.collect_rw().await?;
match source.extract_source_type() {
SourceType::SockRecv(mut src) => {
buf[0..sz].copy_from_slice(&src.take().unwrap().as_bytes()[0..sz]);
}
_ => unreachable!(),
}
Ok(sz)
}
pub(crate) fn poll_read(
&mut self,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let reactor = self.reactor.upgrade().unwrap();
let reactor = reactor.as_ref();
let no_pending_poll = self
.source_rx
.as_ref()
.map(|src| src.result().is_some())
.unwrap_or(true);
if no_pending_poll {
if let Some(result) = super::yolo_recv(self.stream.as_raw_fd(), buf) {
self.source_rx.take();
self.read_timeout.cancel_timer(reactor);
let result = poll_err!(result);
// Start an early poll if the buffer is not fully filled. So when
// the next time `poll_read` is called, it will be known immediately
// whether the underlying stream is ready for reading.
if result > 0 && result < buf.len() {
self.source_rx = Some(reactor.poll_read_ready(self.stream.as_raw_fd()));
// The `rush_dispatch`s here and after could be removed to
// improve performance if #458 is handled appropriately.
// reactor.rush_dispatch(self.source_rx.as_ref().unwrap());
}
return Poll::Ready(Ok(result));
}
}
poll_err!(self.read_timeout.check(reactor));
if no_pending_poll {
self.source_rx = Some(reactor.poll_read_ready(self.stream.as_raw_fd()));
// reactor.rush_dispatch(self.source_rx.as_ref().unwrap());
}
let source = self.source_rx.as_ref().unwrap();
source.add_waiter_single(cx.waker());
self.read_timeout.maybe_set_timer(reactor, cx.waker());
Poll::Pending
}
pub(crate) fn poll_write(
&mut self,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
// On the write path, we always start with calling `yolo_send`, because
// it is very likely to success. It could be a waste if it already timed
// out since the last `poll_write`, but it would not cost much more to
// give it one last chance in this case.
if let Some(result) = super::yolo_send(self.stream.as_raw_fd(), buf) {
let reactor = self.reactor.upgrade().unwrap();
self.write_timeout.cancel_timer(reactor.as_ref());
self.source_tx.take();
return Poll::Ready(result);
}
let reactor = self.reactor.upgrade().unwrap();
let reactor = reactor.as_ref();
poll_err!(self.write_timeout.check(reactor));
let no_pending_poll = self
.source_tx
.as_ref()
.map(|src| src.result().is_some())
.unwrap_or(true);
if no_pending_poll {
self.source_tx = Some(reactor.poll_write_ready(self.stream.as_raw_fd()));
}
let source = self.source_tx.as_ref().unwrap();
source.add_waiter_single(cx.waker());
self.write_timeout.maybe_set_timer(reactor, cx.waker());
Poll::Pending
}
pub(crate) fn poll_close(&mut self, _: &mut Context<'_>) -> Poll<io::Result<()>> {
self.source_tx.take();
Poll::Ready(sys::shutdown(self.stream.as_raw_fd(), Shutdown::Write))
}
/// io_uring has support for shutdown now, but it is not in any released
/// kernel. Even with my "let's use latest" policy it would be crazy to
/// mandate a kernel that doesn't even exist. So in preparation for that
/// we'll sync-emulate this but already on an async wrapper
pub(crate) fn poll_shutdown(
&self,
_cx: &mut Context<'_>,
how: Shutdown,
) -> Poll<io::Result<()>> {
Poll::Ready(sys::shutdown(self.stream.as_raw_fd(), how))
}
}
#[derive(Debug)]
pub(crate) struct GlommioStream<S, B> {
stream: NonBufferedStream<S>,
rx_buf: B,
rx_done: Cell<bool>,
}
impl<S> From<socket2::Socket> for GlommioStream<S, NonBuffered>
where
S: AsRawFd + From<socket2::Socket> + Unpin,
{
fn from(socket: socket2::Socket) -> Self {
let reactor = crate::executor().reactor();
let mut stream = NonBufferedStream {
reactor: Rc::downgrade(&reactor),
stream: socket.into(),
source_tx: None,
source_rx: None,
write_timeout: Timeout::new(reactor.register_timer()),
read_timeout: Timeout::new(reactor.register_timer()),
};
stream.init();
GlommioStream {
stream,
rx_buf: NonBuffered,
rx_done: Cell::new(false),
}
}
}
impl<S: AsRawFd> AsRawFd for GlommioStream<S, NonBuffered> {
fn as_raw_fd(&self) -> RawFd {
self.stream.stream.as_raw_fd()
}
}
impl<S> FromRawFd for GlommioStream<S, NonBuffered>
where
S: AsRawFd + FromRawFd + From<socket2::Socket> + Unpin,
{
unsafe fn from_raw_fd(fd: RawFd) -> Self {
let socket = socket2::Socket::from_raw_fd(fd);
GlommioStream::from(socket)
}
}
impl<S> GlommioStream<S, NonBuffered> {
pub(crate) fn buffered_with<B: Buffered>(self, rx_buf: B) -> GlommioStream<S, B> {
GlommioStream {
stream: self.stream,
rx_buf,
rx_done: self.rx_done,
}
}
}
impl<S: AsRawFd, B: RxBuf> GlommioStream<S, B> {
/// Receives data on the socket from the remote address to which it is
/// connected, without removing that data from the queue.
///
/// On success, returns the number of bytes peeked.
/// Successive calls return the same data. This is accomplished by passing
/// `MSG_PEEK` as a flag to the underlying `recv` system call.
pub(crate) async fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
let mut pos = self.rx_buf.peek(buf);
if pos < buf.len() && !self.rx_done.get() {
if let Some(result) = self.stream.try_peek(&mut buf[pos..]) {
match result {
Err(e) => return Err(e),
Ok(len) => {
pos += len;
if len == 0 {
self.rx_done.set(true);
}
}
}
}
}
if pos > 0 || self.rx_done.get() {
return Ok(pos);
}
self.stream.peek(buf).await
}
pub(crate) fn poll_read(
&mut self,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
if self.rx_buf.is_empty() {
if buf.len() >= self.rx_buf.buffer_size() {
return self.stream.poll_read(cx, buf);
}
if !self.rx_done.get() {
poll_err!(ready!(self.poll_replenish_buffer(cx)));
}
}
Poll::Ready(Ok(self.rx_buf.read(buf)))
}
fn poll_replenish_buffer(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
let result = poll_err!(ready!(self.stream.poll_read(cx, self.rx_buf.unfilled())));
self.rx_buf.handle_result(result);
if result == 0 {
self.rx_done.set(true);
}
Poll::Ready(Ok(result))
}
pub(crate) fn poll_write(
&mut self,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.stream.poll_write(cx, buf)
}
pub(crate) fn poll_flush(&self, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
pub(crate) fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.stream.poll_close(cx)
}
pub(crate) fn poll_shutdown(
&self,
cx: &mut Context<'_>,
how: Shutdown,
) -> Poll<io::Result<()>> {
self.stream.poll_shutdown(cx, how)
}
pub(crate) fn set_write_timeout(&self, dur: Option<Duration>) -> Result<()> {
self.stream.write_timeout.set(dur)
}
pub(crate) fn set_read_timeout(&self, dur: Option<Duration>) -> Result<()> {
self.stream.read_timeout.set(dur)
}
pub(crate) fn write_timeout(&self) -> Option<Duration> {
self.stream.write_timeout.get()
}
pub(crate) fn read_timeout(&self) -> Option<Duration> {
self.stream.read_timeout.get()
}
pub(crate) fn stream(&self) -> &S {
&self.stream.stream
}
}
impl<S: AsRawFd, B: Buffered> GlommioStream<S, B> {
pub(crate) fn poll_fill_buf(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
if self.rx_buf.is_empty() {
poll_err!(ready!(self.poll_replenish_buffer(cx)));
}
Poll::Ready(Ok(self.rx_buf.as_bytes()))
}
pub(crate) fn consume(&mut self, amt: usize) {
self.rx_buf.consume(amt);
}
}
| 28.848369 | 113 | 0.546707 |
7583180718b2a8e942339a324e9ffe4c88119c02 | 3,672 | #![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
#![allow(clippy::vec_init_then_push)]
#![allow(rustdoc::bare_urls)]
#![warn(missing_docs)]
//! <note>
//! <p>This is <b>AWS WAF Classic Regional</b> documentation. For
//! more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS
//! WAF Classic</a> in the developer guide.</p>
//! <p>
//! <b>For the latest version of AWS
//! WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p>
//! </note>
//! <p>This is the <i>AWS WAF Regional Classic API Reference</i> for using AWS WAF Classic with the AWS resources, Elastic Load Balancing (ELB) Application Load Balancers and API Gateway APIs. The AWS WAF Classic actions and data types listed in the reference are available for protecting Elastic Load Balancing (ELB) Application Load Balancers and API Gateway APIs. You can use these actions and data types by means of the endpoints listed in <a href="https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region">AWS Regions and Endpoints</a>. This guide is for developers who need detailed information about the AWS WAF Classic API actions, data types, and errors. For detailed information about AWS WAF Classic features and an overview of how to use the AWS WAF Classic API, see the
//! <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS
//! WAF Classic</a> in the developer guide.</p>
//!
//! # Crate Organization
//!
//! The entry point for most customers will be [`Client`]. [`Client`] exposes one method for each API offered
//! by the service.
//!
//! Some APIs require complex or nested arguments. These exist in [`model`](crate::model).
//!
//! Lastly, errors that can be returned by the service are contained within [`error`]. [`Error`] defines a meta
//! error encompassing all possible errors that can be returned by the service.
//!
//! The other modules within this crate are not required for normal usage.
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use error_meta::Error;
pub use config::Config;
mod aws_endpoint;
/// Client and fluent builders for calling the service.
pub mod client;
/// Configuration for the service.
pub mod config;
/// Errors that can occur when calling the service.
pub mod error;
mod error_meta;
/// Input structures for operations.
pub mod input;
mod json_deser;
mod json_errors;
mod json_ser;
pub mod middleware;
/// Data structures used by operation inputs/outputs.
pub mod model;
mod no_credentials;
/// All operations that this crate can perform.
pub mod operation;
mod operation_deser;
mod operation_ser;
/// Output structures for operations.
pub mod output;
/// Crate version number.
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use aws_smithy_http::byte_stream::ByteStream;
pub use aws_smithy_http::result::SdkError;
pub use aws_smithy_types::Blob;
pub use aws_smithy_types::DateTime;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("wafregional", PKG_VERSION);
pub use aws_smithy_http::endpoint::Endpoint;
pub use aws_smithy_types::retry::RetryConfig;
pub use aws_types::app_name::AppName;
pub use aws_types::region::Region;
pub use aws_types::Credentials;
pub use client::Client;
| 48.315789 | 793 | 0.757353 |
691af29a27b3a6d1d2f148539525d55dc56a942e | 1,346 | use std::path::PathBuf;
/// A window-related event.
#[derive(PartialEq, Clone, Debug)]
pub enum Event {
/// A window was moved.
Moved {
/// The new logical x location of the window
x: i32,
/// The new logical y location of the window
y: i32,
},
/// A window was resized.
Resized {
/// The new logical width of the window
width: u32,
/// The new logical height of the window
height: u32,
},
/// The user has requested for the window to close.
///
/// Usually, you will want to terminate the execution whenever this event
/// occurs.
CloseRequested,
/// A window was focused.
Focused,
/// A window was unfocused.
Unfocused,
/// A file is being hovered over the window.
///
/// When the user hovers multiple files at once, this event will be emitted
/// for each file separately.
FileHovered(PathBuf),
/// A file has beend dropped into the window.
///
/// When the user drops multiple files at once, this event will be emitted
/// for each file separately.
FileDropped(PathBuf),
/// A file was hovered, but has exited the window.
///
/// There will be a single `FilesHoveredLeft` event triggered even if
/// multiple files were hovered.
FilesHoveredLeft,
}
| 25.884615 | 79 | 0.611441 |
e96f5b9e8833c5f1204a03de9874c857c198ecf5 | 36,993 | use std::cell::RefCell;
use std::fmt;
use std::future::Future;
use std::rc::Rc;
use actix_http::Extensions;
use actix_router::{ResourceDef, Router};
use actix_service::boxed::{self, BoxService, BoxServiceFactory};
use actix_service::{
apply, apply_fn_factory, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt,
Transform,
};
use futures_core::future::LocalBoxFuture;
use futures_util::future::join_all;
use crate::config::ServiceConfig;
use crate::data::Data;
use crate::dev::{AppService, HttpServiceFactory};
use crate::error::Error;
use crate::guard::Guard;
use crate::resource::Resource;
use crate::rmap::ResourceMap;
use crate::route::Route;
use crate::service::{
AppServiceFactory, ServiceFactoryWrapper, ServiceRequest, ServiceResponse,
};
type Guards = Vec<Box<dyn Guard>>;
type HttpService = BoxService<ServiceRequest, ServiceResponse, Error>;
type HttpNewService = BoxServiceFactory<(), ServiceRequest, ServiceResponse, Error, ()>;
/// Resources scope.
///
/// Scope is a set of resources with common root path.
/// Scopes collect multiple paths under a common path prefix.
/// Scope path can contain variable path segments as resources.
/// Scope prefix is always complete path segment, i.e `/app` would
/// be converted to a `/app/` and it would not match `/app` path.
///
/// You can get variable path segments from `HttpRequest::match_info()`.
/// `Path` extractor also is able to extract scope level variable segments.
///
/// ```rust
/// use actix_web::{web, App, HttpResponse};
///
/// fn main() {
/// let app = App::new().service(
/// web::scope("/{project_id}/")
/// .service(web::resource("/path1").to(|| async { HttpResponse::Ok() }))
/// .service(web::resource("/path2").route(web::get().to(|| HttpResponse::Ok())))
/// .service(web::resource("/path3").route(web::head().to(|| HttpResponse::MethodNotAllowed())))
/// );
/// }
/// ```
///
/// In the above example three routes get registered:
/// * /{project_id}/path1 - responds to all http method
/// * /{project_id}/path2 - `GET` requests
/// * /{project_id}/path3 - `HEAD` requests
pub struct Scope<T = ScopeEndpoint> {
endpoint: T,
rdef: String,
app_data: Option<Extensions>,
services: Vec<Box<dyn AppServiceFactory>>,
guards: Vec<Box<dyn Guard>>,
default: Option<Rc<HttpNewService>>,
external: Vec<ResourceDef>,
factory_ref: Rc<RefCell<Option<ScopeFactory>>>,
}
impl Scope {
/// Create a new scope
pub fn new(path: &str) -> Scope {
let fref = Rc::new(RefCell::new(None));
Scope {
endpoint: ScopeEndpoint::new(fref.clone()),
rdef: path.to_string(),
app_data: None,
guards: Vec::new(),
services: Vec::new(),
default: None,
external: Vec::new(),
factory_ref: fref,
}
}
}
impl<T> Scope<T>
where
T: ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
{
/// Add match guard to a scope.
///
/// ```rust
/// use actix_web::{web, guard, App, HttpRequest, HttpResponse};
///
/// async fn index(data: web::Path<(String, String)>) -> &'static str {
/// "Welcome!"
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::scope("/app")
/// .guard(guard::Header("content-type", "text/plain"))
/// .route("/test1", web::get().to(index))
/// .route("/test2", web::post().to(|r: HttpRequest| {
/// HttpResponse::MethodNotAllowed()
/// }))
/// );
/// }
/// ```
pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.guards.push(Box::new(guard));
self
}
/// Set or override application data. Application data could be accessed
/// by using `Data<T>` extractor where `T` is data type.
///
/// ```rust
/// use std::cell::Cell;
/// use actix_web::{web, App, HttpResponse, Responder};
///
/// struct MyData {
/// counter: Cell<usize>,
/// }
///
/// async fn index(data: web::Data<MyData>) -> impl Responder {
/// data.counter.set(data.counter.get() + 1);
/// HttpResponse::Ok()
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::scope("/app")
/// .data(MyData{ counter: Cell::new(0) })
/// .service(
/// web::resource("/index.html").route(
/// web::get().to(index)))
/// );
/// }
/// ```
pub fn data<U: 'static>(self, data: U) -> Self {
self.app_data(Data::new(data))
}
/// Add scope data.
///
/// Data of different types from parent contexts will still be accessible.
pub fn app_data<U: 'static>(mut self, data: U) -> Self {
self.app_data
.get_or_insert_with(Extensions::new)
.insert(data);
self
}
/// Run external configuration as part of the scope building
/// process
///
/// This function is useful for moving parts of configuration to a
/// different module or even library. For example,
/// some of the resource's configuration could be moved to different module.
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{web, middleware, App, HttpResponse};
///
/// // this function could be located in different module
/// fn config(cfg: &mut web::ServiceConfig) {
/// cfg.service(web::resource("/test")
/// .route(web::get().to(|| HttpResponse::Ok()))
/// .route(web::head().to(|| HttpResponse::MethodNotAllowed()))
/// );
/// }
///
/// fn main() {
/// let app = App::new()
/// .wrap(middleware::Logger::default())
/// .service(
/// web::scope("/api")
/// .configure(config)
/// )
/// .route("/index.html", web::get().to(|| HttpResponse::Ok()));
/// }
/// ```
pub fn configure<F>(mut self, f: F) -> Self
where
F: FnOnce(&mut ServiceConfig),
{
let mut cfg = ServiceConfig::new();
f(&mut cfg);
self.services.extend(cfg.services);
self.external.extend(cfg.external);
self.app_data
.get_or_insert_with(Extensions::new)
.extend(cfg.app_data);
self
}
/// Register HTTP service.
///
/// This is similar to `App's` service registration.
///
/// Actix Web provides several services implementations:
///
/// * *Resource* is an entry in resource table which corresponds to requested URL.
/// * *Scope* is a set of resources with common root path.
/// * "StaticFiles" is a service for static files support
///
/// ```rust
/// use actix_web::{web, App, HttpRequest};
///
/// struct AppState;
///
/// async fn index(req: HttpRequest) -> &'static str {
/// "Welcome!"
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::scope("/app").service(
/// web::scope("/v1")
/// .service(web::resource("/test1").to(index)))
/// );
/// }
/// ```
pub fn service<F>(mut self, factory: F) -> Self
where
F: HttpServiceFactory + 'static,
{
self.services
.push(Box::new(ServiceFactoryWrapper::new(factory)));
self
}
/// Configure route for a specific path.
///
/// This is a simplified version of the `Scope::service()` method.
/// This method can be called multiple times, in that case
/// multiple resources with one route would be registered for same resource path.
///
/// ```rust
/// use actix_web::{web, App, HttpResponse};
///
/// async fn index(data: web::Path<(String, String)>) -> &'static str {
/// "Welcome!"
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::scope("/app")
/// .route("/test1", web::get().to(index))
/// .route("/test2", web::post().to(|| HttpResponse::MethodNotAllowed()))
/// );
/// }
/// ```
pub fn route(self, path: &str, mut route: Route) -> Self {
self.service(
Resource::new(path)
.add_guards(route.take_guards())
.route(route),
)
}
/// Default service to be used if no matching route could be found.
///
/// If default resource is not registered, app's default resource is being used.
pub fn default_service<F, U>(mut self, f: F) -> Self
where
F: IntoServiceFactory<U, ServiceRequest>,
U: ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
> + 'static,
U::InitError: fmt::Debug,
{
// create and configure default resource
self.default = Some(Rc::new(boxed::factory(f.into_factory().map_init_err(
|e| log::error!("Can not construct default service: {:?}", e),
))));
self
}
/// Registers middleware, in the form of a middleware component (type),
/// that runs during inbound processing in the request
/// life-cycle (request -> response), modifying request as
/// necessary, across all requests managed by the *Scope*. Scope-level
/// middleware is more limited in what it can modify, relative to Route or
/// Application level middleware, in that Scope-level middleware can not modify
/// ServiceResponse.
///
/// Use middleware when you need to read or modify *every* request in some way.
pub fn wrap<M>(
self,
mw: M,
) -> Scope<
impl ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
>
where
M: Transform<
T::Service,
ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
{
Scope {
endpoint: apply(mw, self.endpoint),
rdef: self.rdef,
app_data: self.app_data,
guards: self.guards,
services: self.services,
default: self.default,
external: self.external,
factory_ref: self.factory_ref,
}
}
/// Registers middleware, in the form of a closure, that runs during inbound
/// processing in the request life-cycle (request -> response), modifying
/// request as necessary, across all requests managed by the *Scope*.
/// Scope-level middleware is more limited in what it can modify, relative
/// to Route or Application level middleware, in that Scope-level middleware
/// can not modify ServiceResponse.
///
/// ```rust
/// use actix_service::Service;
/// use actix_web::{web, App};
/// use actix_web::http::{header::CONTENT_TYPE, HeaderValue};
///
/// async fn index() -> &'static str {
/// "Welcome!"
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::scope("/app")
/// .wrap_fn(|req, srv| {
/// let fut = srv.call(req);
/// async {
/// let mut res = fut.await?;
/// res.headers_mut().insert(
/// CONTENT_TYPE, HeaderValue::from_static("text/plain"),
/// );
/// Ok(res)
/// }
/// })
/// .route("/index.html", web::get().to(index)));
/// }
/// ```
pub fn wrap_fn<F, R>(
self,
mw: F,
) -> Scope<
impl ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
>
where
F: Fn(ServiceRequest, &T::Service) -> R + Clone,
R: Future<Output = Result<ServiceResponse, Error>>,
{
Scope {
endpoint: apply_fn_factory(self.endpoint, mw),
rdef: self.rdef,
app_data: self.app_data,
guards: self.guards,
services: self.services,
default: self.default,
external: self.external,
factory_ref: self.factory_ref,
}
}
}
impl<T> HttpServiceFactory for Scope<T>
where
T: ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
InitError = (),
> + 'static,
{
fn register(mut self, config: &mut AppService) {
// update default resource if needed
let default = self.default.unwrap_or_else(|| config.default_service());
// register nested services
let mut cfg = config.clone_config();
self.services
.into_iter()
.for_each(|mut srv| srv.register(&mut cfg));
let mut rmap = ResourceMap::new(ResourceDef::root_prefix(&self.rdef));
// external resources
for mut rdef in std::mem::take(&mut self.external) {
rmap.add(&mut rdef, None);
}
// complete scope pipeline creation
*self.factory_ref.borrow_mut() = Some(ScopeFactory {
app_data: self.app_data.take().map(Rc::new),
default,
services: cfg
.into_services()
.1
.into_iter()
.map(|(mut rdef, srv, guards, nested)| {
rmap.add(&mut rdef, nested);
(rdef, srv, RefCell::new(guards))
})
.collect::<Vec<_>>()
.into_boxed_slice()
.into(),
});
// get guards
let guards = if self.guards.is_empty() {
None
} else {
Some(self.guards)
};
// register final service
config.register_service(
ResourceDef::root_prefix(&self.rdef),
guards,
self.endpoint,
Some(Rc::new(rmap)),
)
}
}
pub struct ScopeFactory {
app_data: Option<Rc<Extensions>>,
services: Rc<[(ResourceDef, HttpNewService, RefCell<Option<Guards>>)]>,
default: Rc<HttpNewService>,
}
impl ServiceFactory<ServiceRequest> for ScopeFactory {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
type Service = ScopeService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
// construct default service factory future
let default_fut = self.default.new_service(());
// construct all services factory future with it's resource def and guards.
let factory_fut = join_all(self.services.iter().map(|(path, factory, guards)| {
let path = path.clone();
let guards = guards.borrow_mut().take();
let factory_fut = factory.new_service(());
async move {
let service = factory_fut.await?;
Ok((path, guards, service))
}
}));
let app_data = self.app_data.clone();
Box::pin(async move {
let default = default_fut.await?;
// build router from the factory future result.
let router = factory_fut
.await
.into_iter()
.collect::<Result<Vec<_>, _>>()?
.drain(..)
.fold(Router::build(), |mut router, (path, guards, service)| {
router.rdef(path, service).2 = guards;
router
})
.finish();
Ok(ScopeService {
app_data,
router,
default,
})
})
}
}
pub struct ScopeService {
app_data: Option<Rc<Extensions>>,
router: Router<HttpService, Vec<Box<dyn Guard>>>,
default: HttpService,
}
impl Service<ServiceRequest> for ScopeService {
type Response = ServiceResponse;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, mut req: ServiceRequest) -> Self::Future {
let res = self.router.recognize_checked(&mut req, |req, guards| {
if let Some(ref guards) = guards {
for f in guards {
if !f.check(req.head()) {
return false;
}
}
}
true
});
if let Some(ref app_data) = self.app_data {
req.add_data_container(app_data.clone());
}
if let Some((srv, _info)) = res {
srv.call(req)
} else {
self.default.call(req)
}
}
}
#[doc(hidden)]
pub struct ScopeEndpoint {
factory: Rc<RefCell<Option<ScopeFactory>>>,
}
impl ScopeEndpoint {
fn new(factory: Rc<RefCell<Option<ScopeFactory>>>) -> Self {
ScopeEndpoint { factory }
}
}
impl ServiceFactory<ServiceRequest> for ScopeEndpoint {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
type Service = ScopeService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
self.factory.borrow_mut().as_mut().unwrap().new_service(())
}
}
#[cfg(test)]
mod tests {
use actix_service::Service;
use bytes::Bytes;
use futures_util::future::ok;
use crate::dev::{Body, ResponseBody};
use crate::http::{header, HeaderValue, Method, StatusCode};
use crate::middleware::DefaultHeaders;
use crate::service::ServiceRequest;
use crate::test::{call_service, init_service, read_body, TestRequest};
use crate::{guard, web, App, HttpRequest, HttpResponse};
#[actix_rt::test]
async fn test_scope() {
let srv =
init_service(App::new().service(
web::scope("/app").service(web::resource("/path1").to(HttpResponse::Ok)),
))
.await;
let req = TestRequest::with_uri("/app/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_scope_root() {
let srv = init_service(
App::new().service(
web::scope("/app")
.service(web::resource("").to(HttpResponse::Ok))
.service(web::resource("/").to(HttpResponse::Created)),
),
)
.await;
let req = TestRequest::with_uri("/app").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::CREATED);
}
#[actix_rt::test]
async fn test_scope_root2() {
let srv = init_service(
App::new()
.service(web::scope("/app/").service(web::resource("").to(HttpResponse::Ok))),
)
.await;
let req = TestRequest::with_uri("/app").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/app/").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_scope_root3() {
let srv = init_service(
App::new()
.service(web::scope("/app/").service(web::resource("/").to(HttpResponse::Ok))),
)
.await;
let req = TestRequest::with_uri("/app").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/app/").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_scope_route() {
let srv = init_service(
App::new().service(
web::scope("app")
.route("/path1", web::get().to(HttpResponse::Ok))
.route("/path1", web::delete().to(HttpResponse::Ok)),
),
)
.await;
let req = TestRequest::with_uri("/app/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::DELETE)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::POST)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_scope_route_without_leading_slash() {
let srv = init_service(
App::new().service(
web::scope("app").service(
web::resource("path1")
.route(web::get().to(HttpResponse::Ok))
.route(web::delete().to(HttpResponse::Ok)),
),
),
)
.await;
let req = TestRequest::with_uri("/app/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::DELETE)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::POST)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED);
}
#[actix_rt::test]
async fn test_scope_guard() {
let srv = init_service(
App::new().service(
web::scope("/app")
.guard(guard::Get())
.service(web::resource("/path1").to(HttpResponse::Ok)),
),
)
.await;
let req = TestRequest::with_uri("/app/path1")
.method(Method::POST)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/app/path1")
.method(Method::GET)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_scope_variable_segment() {
let srv = init_service(App::new().service(web::scope("/ab-{project}").service(
web::resource("/path1").to(|r: HttpRequest| {
HttpResponse::Ok().body(format!("project: {}", &r.match_info()["project"]))
}),
)))
.await;
let req = TestRequest::with_uri("/ab-project1/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
match resp.response().body() {
ResponseBody::Body(Body::Bytes(ref b)) => {
let bytes = b.clone();
assert_eq!(bytes, Bytes::from_static(b"project: project1"));
}
_ => panic!(),
}
let req = TestRequest::with_uri("/aa-project1/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_nested_scope() {
let srv = init_service(App::new().service(web::scope("/app").service(
web::scope("/t1").service(web::resource("/path1").to(HttpResponse::Created)),
)))
.await;
let req = TestRequest::with_uri("/app/t1/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::CREATED);
}
#[actix_rt::test]
async fn test_nested_scope_no_slash() {
let srv = init_service(App::new().service(web::scope("/app").service(
web::scope("t1").service(web::resource("/path1").to(HttpResponse::Created)),
)))
.await;
let req = TestRequest::with_uri("/app/t1/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::CREATED);
}
#[actix_rt::test]
async fn test_nested_scope_root() {
let srv = init_service(
App::new().service(
web::scope("/app").service(
web::scope("/t1")
.service(web::resource("").to(HttpResponse::Ok))
.service(web::resource("/").to(HttpResponse::Created)),
),
),
)
.await;
let req = TestRequest::with_uri("/app/t1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/t1/").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::CREATED);
}
#[actix_rt::test]
async fn test_nested_scope_filter() {
let srv = init_service(
App::new().service(
web::scope("/app").service(
web::scope("/t1")
.guard(guard::Get())
.service(web::resource("/path1").to(HttpResponse::Ok)),
),
),
)
.await;
let req = TestRequest::with_uri("/app/t1/path1")
.method(Method::POST)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/app/t1/path1")
.method(Method::GET)
.to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_nested_scope_with_variable_segment() {
let srv = init_service(App::new().service(web::scope("/app").service(
web::scope("/{project_id}").service(web::resource("/path1").to(
|r: HttpRequest| {
HttpResponse::Created()
.body(format!("project: {}", &r.match_info()["project_id"]))
},
)),
)))
.await;
let req = TestRequest::with_uri("/app/project_1/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::CREATED);
match resp.response().body() {
ResponseBody::Body(Body::Bytes(ref b)) => {
let bytes = b.clone();
assert_eq!(bytes, Bytes::from_static(b"project: project_1"));
}
_ => panic!(),
}
}
#[actix_rt::test]
async fn test_nested2_scope_with_variable_segment() {
let srv = init_service(App::new().service(web::scope("/app").service(
web::scope("/{project}").service(web::scope("/{id}").service(
web::resource("/path1").to(|r: HttpRequest| {
HttpResponse::Created().body(format!(
"project: {} - {}",
&r.match_info()["project"],
&r.match_info()["id"],
))
}),
)),
)))
.await;
let req = TestRequest::with_uri("/app/test/1/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::CREATED);
match resp.response().body() {
ResponseBody::Body(Body::Bytes(ref b)) => {
let bytes = b.clone();
assert_eq!(bytes, Bytes::from_static(b"project: test - 1"));
}
_ => panic!(),
}
let req = TestRequest::with_uri("/app/test/1/path2").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_default_resource() {
let srv = init_service(
App::new().service(
web::scope("/app")
.service(web::resource("/path1").to(HttpResponse::Ok))
.default_service(|r: ServiceRequest| {
ok(r.into_response(HttpResponse::BadRequest()))
}),
),
)
.await;
let req = TestRequest::with_uri("/app/path2").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
let req = TestRequest::with_uri("/path2").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_default_resource_propagation() {
let srv = init_service(
App::new()
.service(
web::scope("/app1")
.default_service(web::resource("").to(HttpResponse::BadRequest)),
)
.service(web::scope("/app2"))
.default_service(|r: ServiceRequest| {
ok(r.into_response(HttpResponse::MethodNotAllowed()))
}),
)
.await;
let req = TestRequest::with_uri("/non-exist").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED);
let req = TestRequest::with_uri("/app1/non-exist").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
let req = TestRequest::with_uri("/app2/non-exist").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED);
}
#[actix_rt::test]
async fn test_middleware() {
let srv = init_service(
App::new().service(
web::scope("app")
.wrap(
DefaultHeaders::new()
.header(header::CONTENT_TYPE, HeaderValue::from_static("0001")),
)
.service(web::resource("/test").route(web::get().to(HttpResponse::Ok))),
),
)
.await;
let req = TestRequest::with_uri("/app/test").to_request();
let resp = call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
HeaderValue::from_static("0001")
);
}
#[actix_rt::test]
async fn test_middleware_fn() {
let srv = init_service(
App::new().service(
web::scope("app")
.wrap_fn(|req, srv| {
let fut = srv.call(req);
async move {
let mut res = fut.await?;
res.headers_mut()
.insert(header::CONTENT_TYPE, HeaderValue::from_static("0001"));
Ok(res)
}
})
.route("/test", web::get().to(HttpResponse::Ok)),
),
)
.await;
let req = TestRequest::with_uri("/app/test").to_request();
let resp = call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
HeaderValue::from_static("0001")
);
}
#[actix_rt::test]
async fn test_override_data() {
let srv = init_service(App::new().data(1usize).service(
web::scope("app").data(10usize).route(
"/t",
web::get().to(|data: web::Data<usize>| {
assert_eq!(**data, 10);
HttpResponse::Ok()
}),
),
))
.await;
let req = TestRequest::with_uri("/app/t").to_request();
let resp = call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_override_data_default_service() {
let srv = init_service(App::new().data(1usize).service(
web::scope("app").data(10usize).default_service(web::to(
|data: web::Data<usize>| {
assert_eq!(**data, 10);
HttpResponse::Ok()
},
)),
))
.await;
let req = TestRequest::with_uri("/app/t").to_request();
let resp = call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_override_app_data() {
let srv = init_service(App::new().app_data(web::Data::new(1usize)).service(
web::scope("app").app_data(web::Data::new(10usize)).route(
"/t",
web::get().to(|data: web::Data<usize>| {
assert_eq!(**data, 10);
HttpResponse::Ok()
}),
),
))
.await;
let req = TestRequest::with_uri("/app/t").to_request();
let resp = call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_scope_config() {
let srv = init_service(App::new().service(web::scope("/app").configure(|s| {
s.route("/path1", web::get().to(HttpResponse::Ok));
})))
.await;
let req = TestRequest::with_uri("/app/path1").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_scope_config_2() {
let srv = init_service(App::new().service(web::scope("/app").configure(|s| {
s.service(web::scope("/v1").configure(|s| {
s.route("/", web::get().to(HttpResponse::Ok));
}));
})))
.await;
let req = TestRequest::with_uri("/app/v1/").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_url_for_external() {
let srv = init_service(App::new().service(web::scope("/app").configure(|s| {
s.service(web::scope("/v1").configure(|s| {
s.external_resource("youtube", "https://youtube.com/watch/{video_id}");
s.route(
"/",
web::get().to(|req: HttpRequest| {
HttpResponse::Ok()
.body(req.url_for("youtube", &["xxxxxx"]).unwrap().to_string())
}),
);
}));
})))
.await;
let req = TestRequest::with_uri("/app/v1/").to_request();
let resp = srv.call(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let body = read_body(resp).await;
assert_eq!(body, &b"https://youtube.com/watch/xxxxxx"[..]);
}
#[actix_rt::test]
async fn test_url_for_nested() {
let srv = init_service(App::new().service(web::scope("/a").service(
web::scope("/b").service(web::resource("/c/{stuff}").name("c").route(
web::get().to(|req: HttpRequest| {
HttpResponse::Ok()
.body(format!("{}", req.url_for("c", &["12345"]).unwrap()))
}),
)),
)))
.await;
let req = TestRequest::with_uri("/a/b/c/test").to_request();
let resp = call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = read_body(resp).await;
assert_eq!(
body,
Bytes::from_static(b"http://localhost:8080/a/b/c/12345")
);
}
}
| 33.118174 | 108 | 0.521639 |
48568f38d629c1e40db8834d3184bc30873fdc76 | 777 | #![feature(proc_macro_hygiene, decl_macro)]
// Temporarily silence warnings caused by Diesel (https://github.com/diesel-rs/diesel/issues/1785)
#![allow(proc_macro_derive_resolution_fallback)]
#[macro_use]
extern crate rocket;
#[macro_use]
extern crate rocket_contrib;
#[macro_use]
extern crate diesel;
// Utility local dependencies
mod database;
mod schema;
// Table specifc local dependencies
mod attendance;
mod event;
mod member;
// Check to see if the server is working
#[get("/")]
fn index() -> &'static str {
"Hello, world!"
}
// Launch the REST server with the database connection
fn main() {
rocket::ignite()
.attach(database::ClubDbConn::fairing())
// Note: Be sure to mount all the routes from differnt modules
.mount("/", routes![index])
.launch();
}
| 22.2 | 98 | 0.728443 |
eb72c9fa78fafae7775f65d3f3dd948ccd7506b4 | 195 | // Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
//! FFI bindings to msxml6.
#![no_std]
#![experimental]
extern crate winapi;
use winapi::*;
extern "system" {
}
| 19.5 | 46 | 0.697436 |
8a7e79e91e2acfa20582a43fb8da025033d49e00 | 2,941 | use fnv::FnvHashMap;
use prelude::*;
use std::collections::{HashMap, VecDeque};
#[derive(Serialize, Deserialize)]
struct EgressTx {
node: NodeIndex,
local: LocalNodeIndex,
dest: ReplicaAddr,
}
#[derive(Serialize, Deserialize)]
pub struct Egress {
txs: Vec<EgressTx>,
tags: HashMap<Tag, NodeIndex>,
}
impl Clone for Egress {
fn clone(&self) -> Self {
assert!(self.txs.is_empty());
Self {
txs: Vec::new(),
tags: self.tags.clone(),
}
}
}
impl Default for Egress {
fn default() -> Self {
Self {
tags: Default::default(),
txs: Default::default(),
}
}
}
impl Egress {
pub fn add_tx(&mut self, dst_g: NodeIndex, dst_l: LocalNodeIndex, addr: ReplicaAddr) {
self.txs.push(EgressTx {
node: dst_g,
local: dst_l,
dest: addr,
});
}
pub fn add_tag(&mut self, tag: Tag, dst: NodeIndex) {
self.tags.insert(tag, dst);
}
pub fn process(
&mut self,
m: &mut Option<Box<Packet>>,
shard: usize,
output: &mut FnvHashMap<ReplicaAddr, VecDeque<Box<Packet>>>,
) {
let &mut Self {
ref mut txs,
ref tags,
} = self;
// send any queued updates to all external children
assert!(txs.len() > 0);
let txn = txs.len() - 1;
// we need to find the ingress node following this egress according to the path
// with replay.tag, and then forward this message only on the channel corresponding
// to that ingress node.
let replay_to = m.as_ref().unwrap().tag().map(|tag| {
tags.get(&tag)
.map(|n| *n)
.expect("egress node told about replay message, but not on replay path")
});
for (txi, ref mut tx) in txs.iter_mut().enumerate() {
let mut take = txi == txn;
if let Some(replay_to) = replay_to.as_ref() {
if *replay_to == tx.node {
take = true;
} else {
continue;
}
}
// Avoid cloning if this is last send
let mut m = if take {
m.take().unwrap()
} else {
// we know this is a data (not a replay)
// because, a replay will force a take
m.as_ref().map(|m| box m.clone_data()).unwrap()
};
// src is usually ignored and overwritten by ingress
// *except* if the ingress is marked as a shard merger
// in which case it wants to know about the shard
m.link_mut().src = unsafe { LocalNodeIndex::make(shard as u32) };
m.link_mut().dst = tx.local;
output.entry(tx.dest).or_default().push_back(m);
if take {
break;
}
}
}
}
| 27.485981 | 91 | 0.509351 |
f93748378872d4f8599fa2d504a205c7e5dca3fb | 21,138 | use crate::ast::{Ident, SpannedTyped, VirErr};
use crate::def::Spanned;
use crate::sst::{BndX, Dest, Exp, ExpX, Stm, StmX, Trig, Trigs, UniqueIdent};
use crate::util::{vec_map, vec_map_result};
use crate::visitor::expr_visitor_control_flow;
pub(crate) use crate::visitor::VisitorControlFlow;
use air::ast::{Binder, BinderX};
use air::scope_map::ScopeMap;
use std::collections::HashMap;
use std::sync::Arc;
pub type VisitorScopeMap = ScopeMap<Ident, bool>;
pub(crate) fn exp_visitor_check<E, MF>(
expr: &Exp,
map: &mut VisitorScopeMap,
mf: &mut MF,
) -> Result<(), E>
where
MF: FnMut(&Exp, &mut VisitorScopeMap) -> Result<(), E>,
{
match exp_visitor_dfs(expr, map, &mut |expr, map| match mf(expr, map) {
Ok(()) => VisitorControlFlow::Recurse,
Err(e) => VisitorControlFlow::Stop(e),
}) {
VisitorControlFlow::Recurse => Ok(()),
VisitorControlFlow::Return => unreachable!(),
VisitorControlFlow::Stop(e) => Err(e),
}
}
pub(crate) fn exp_visitor_dfs<T, F>(
exp: &Exp,
map: &mut VisitorScopeMap,
f: &mut F,
) -> VisitorControlFlow<T>
where
F: FnMut(&Exp, &mut VisitorScopeMap) -> VisitorControlFlow<T>,
{
match f(exp, map) {
VisitorControlFlow::Stop(val) => VisitorControlFlow::Stop(val),
VisitorControlFlow::Return => VisitorControlFlow::Recurse,
VisitorControlFlow::Recurse => {
match &exp.x {
ExpX::Const(_)
| ExpX::Var(..)
| ExpX::VarAt(..)
| ExpX::Old(..)
| ExpX::VarLoc(..) => (),
ExpX::Loc(e0) => {
expr_visitor_control_flow!(exp_visitor_dfs(e0, map, f));
}
ExpX::Call(_x, _typs, es) => {
for e in es.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(e, map, f));
}
}
ExpX::CallLambda(_typ, e0, es) => {
expr_visitor_control_flow!(exp_visitor_dfs(e0, map, f));
for e in es.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(e, map, f));
}
}
ExpX::Ctor(_path, _ident, binders) => {
for binder in binders.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(&binder.a, map, f));
}
}
ExpX::Unary(_op, e1) => {
expr_visitor_control_flow!(exp_visitor_dfs(e1, map, f));
}
ExpX::UnaryOpr(_op, e1) => {
expr_visitor_control_flow!(exp_visitor_dfs(e1, map, f));
}
ExpX::Binary(_op, e1, e2) => {
expr_visitor_control_flow!(exp_visitor_dfs(e1, map, f));
expr_visitor_control_flow!(exp_visitor_dfs(e2, map, f));
}
ExpX::If(e1, e2, e3) => {
expr_visitor_control_flow!(exp_visitor_dfs(e1, map, f));
expr_visitor_control_flow!(exp_visitor_dfs(e2, map, f));
expr_visitor_control_flow!(exp_visitor_dfs(e3, map, f));
}
ExpX::WithTriggers(triggers, body) => {
for trigger in triggers.iter() {
for term in trigger.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(term, map, f));
}
}
expr_visitor_control_flow!(exp_visitor_dfs(body, map, f));
}
ExpX::Bind(bnd, e1) => {
let mut bvars: Vec<(Ident, bool)> = Vec::new();
let mut trigs: Trigs = Arc::new(vec![]);
match &bnd.x {
BndX::Let(bs) => {
for b in bs.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(&b.a, map, f));
bvars.push((b.name.clone(), false));
}
}
BndX::Quant(_quant, binders, ts) => {
for b in binders.iter() {
bvars.push((b.name.clone(), true));
}
trigs = ts.clone();
}
BndX::Lambda(params) => {
for b in params.iter() {
bvars.push((b.name.clone(), false));
}
}
BndX::Choose(params, ts, _) => {
for b in params.iter() {
bvars.push((b.name.clone(), true));
}
trigs = ts.clone();
}
}
map.push_scope(true);
for (x, is_triggered) in bvars {
let _ = map.insert(x, is_triggered);
}
for t in trigs.iter() {
for exp in t.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(exp, map, f));
}
}
if let BndX::Choose(_, _, cond) = &bnd.x {
expr_visitor_control_flow!(exp_visitor_dfs(cond, map, f));
}
expr_visitor_control_flow!(exp_visitor_dfs(e1, map, f));
map.pop_scope();
}
}
VisitorControlFlow::Recurse
}
}
}
pub(crate) fn stm_visitor_dfs<T, F>(stm: &Stm, f: &mut F) -> VisitorControlFlow<T>
where
F: FnMut(&Stm) -> VisitorControlFlow<T>,
{
match f(stm) {
VisitorControlFlow::Stop(val) => VisitorControlFlow::Stop(val),
VisitorControlFlow::Return => VisitorControlFlow::Recurse,
VisitorControlFlow::Recurse => {
match &stm.x {
StmX::Call(..)
| StmX::Assert(_, _)
| StmX::Assume(_)
| StmX::Assign { .. }
| StmX::AssertBV { .. }
| StmX::Fuel(..) => (),
StmX::DeadEnd(s) => {
expr_visitor_control_flow!(stm_visitor_dfs(s, f));
}
StmX::If(_cond, lhs, rhs) => {
expr_visitor_control_flow!(stm_visitor_dfs(lhs, f));
if let Some(rhs) = rhs {
expr_visitor_control_flow!(stm_visitor_dfs(rhs, f));
}
}
StmX::AssertQuery { body, mode: _, typ_inv_vars: _ } => {
expr_visitor_control_flow!(stm_visitor_dfs(body, f));
}
StmX::While {
cond_stms,
cond_exp: _,
body,
invs: _,
typ_inv_vars: _,
modified_vars: _,
} => {
for s in cond_stms.iter() {
expr_visitor_control_flow!(stm_visitor_dfs(s, f));
}
expr_visitor_control_flow!(stm_visitor_dfs(body, f));
}
StmX::OpenInvariant(_inv, _ident, _ty, body, _atomicity) => {
expr_visitor_control_flow!(stm_visitor_dfs(body, f));
}
StmX::Block(ss) => {
for s in ss.iter() {
expr_visitor_control_flow!(stm_visitor_dfs(s, f));
}
}
}
VisitorControlFlow::Recurse
}
}
}
#[allow(dead_code)]
pub(crate) fn stm_exp_visitor_dfs<T, F>(stm: &Stm, f: &mut F) -> VisitorControlFlow<T>
where
F: FnMut(&Exp, &mut VisitorScopeMap) -> VisitorControlFlow<T>,
{
stm_visitor_dfs(stm, &mut |stm| {
match &stm.x {
StmX::Call(_path, _mode, _typs, exps, _dest) => {
for exp in exps.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(exp, &mut ScopeMap::new(), f));
}
}
StmX::Assert(_span2, exp) => {
expr_visitor_control_flow!(exp_visitor_dfs(exp, &mut ScopeMap::new(), f))
}
StmX::AssertBV(exp) => {
expr_visitor_control_flow!(exp_visitor_dfs(exp, &mut ScopeMap::new(), f))
}
StmX::AssertQuery { body: _, typ_inv_vars: _, mode: _ } => (),
StmX::Assume(exp) => {
expr_visitor_control_flow!(exp_visitor_dfs(exp, &mut ScopeMap::new(), f))
}
StmX::Assign { lhs: Dest { dest, .. }, rhs } => {
expr_visitor_control_flow!(exp_visitor_dfs(dest, &mut ScopeMap::new(), f));
expr_visitor_control_flow!(exp_visitor_dfs(rhs, &mut ScopeMap::new(), f))
}
StmX::Fuel(..) | StmX::DeadEnd(..) => (),
StmX::If(exp, _s1, _s2) => {
expr_visitor_control_flow!(exp_visitor_dfs(exp, &mut ScopeMap::new(), f))
}
StmX::While {
cond_stms: _,
cond_exp,
body: _,
invs,
typ_inv_vars: _,
modified_vars: _,
} => {
expr_visitor_control_flow!(exp_visitor_dfs(cond_exp, &mut ScopeMap::new(), f));
for inv in invs.iter() {
expr_visitor_control_flow!(exp_visitor_dfs(inv, &mut ScopeMap::new(), f));
}
}
StmX::OpenInvariant(inv, _ident, _ty, _body, _atomicity) => {
expr_visitor_control_flow!(exp_visitor_dfs(inv, &mut ScopeMap::new(), f))
}
StmX::Block(_) => (),
}
VisitorControlFlow::Recurse
})
}
pub(crate) fn map_exp_visitor_bind<F>(
exp: &Exp,
map: &mut VisitorScopeMap,
f: &mut F,
) -> Result<Exp, VirErr>
where
F: FnMut(&Exp, &mut VisitorScopeMap) -> Result<Exp, VirErr>,
{
let exp_new = |e: ExpX| SpannedTyped::new(&exp.span, &exp.typ, e);
match &exp.x {
ExpX::Const(_) => f(exp, map),
ExpX::Var(..) => f(exp, map),
ExpX::VarAt(..) => f(exp, map),
ExpX::VarLoc(..) => f(exp, map),
ExpX::Loc(e1) => {
let expr1 = map_exp_visitor_bind(e1, map, f)?;
let exp = exp_new(ExpX::Loc(expr1));
f(&exp, map)
}
ExpX::Old(..) => f(exp, map),
ExpX::Call(x, typs, es) => {
let mut exps: Vec<Exp> = Vec::new();
for e in es.iter() {
exps.push(map_exp_visitor_bind(e, map, f)?);
}
let exp = exp_new(ExpX::Call(x.clone(), typs.clone(), Arc::new(exps)));
f(&exp, map)
}
ExpX::CallLambda(typ, e0, es) => {
let e0 = map_exp_visitor_bind(e0, map, f)?;
let mut exps: Vec<Exp> = Vec::new();
for e in es.iter() {
exps.push(map_exp_visitor_bind(e, map, f)?);
}
let exp = exp_new(ExpX::CallLambda(typ.clone(), e0, Arc::new(exps)));
f(&exp, map)
}
ExpX::Ctor(path, ident, binders) => {
let mapped_binders = binders
.iter()
.map(|b| b.map_result(|a| map_exp_visitor_bind(a, map, f)))
.collect::<Result<Vec<_>, _>>()?;
let exp = exp_new(ExpX::Ctor(path.clone(), ident.clone(), Arc::new(mapped_binders)));
f(&exp, map)
}
ExpX::Unary(op, e1) => {
let expr1 = map_exp_visitor_bind(e1, map, f)?;
let exp = exp_new(ExpX::Unary(*op, expr1));
f(&exp, map)
}
ExpX::UnaryOpr(op, e1) => {
let expr1 = map_exp_visitor_bind(e1, map, f)?;
let exp = exp_new(ExpX::UnaryOpr(op.clone(), expr1));
f(&exp, map)
}
ExpX::Binary(op, e1, e2) => {
let expr1 = map_exp_visitor_bind(e1, map, f)?;
let expr2 = map_exp_visitor_bind(e2, map, f)?;
let exp = exp_new(ExpX::Binary(*op, expr1, expr2));
f(&exp, map)
}
ExpX::If(e1, e2, e3) => {
let expr1 = map_exp_visitor_bind(e1, map, f)?;
let expr2 = map_exp_visitor_bind(e2, map, f)?;
let expr3 = map_exp_visitor_bind(e3, map, f)?;
let exp = exp_new(ExpX::If(expr1, expr2, expr3));
f(&exp, map)
}
ExpX::WithTriggers(triggers, body) => {
let mut trigs: Vec<Trig> = Vec::new();
for trigger in triggers.iter() {
let ts = vec_map_result(&**trigger, |e| map_exp_visitor_bind(e, map, f))?;
trigs.push(Arc::new(ts));
}
let body = map_exp_visitor_bind(body, map, f)?;
let exp = exp_new(ExpX::WithTriggers(Arc::new(trigs), body));
f(&exp, map)
}
ExpX::Bind(bnd, e1) => {
let bndx = match &bnd.x {
BndX::Let(bs) => {
let mut binders: Vec<Binder<Exp>> = Vec::new();
for b in bs.iter() {
let a = map_exp_visitor_bind(&b.a, map, f)?;
binders.push(Arc::new(BinderX { name: b.name.clone(), a }));
}
map.push_scope(true);
for b in binders.iter() {
let _ = map.insert(b.name.clone(), false);
}
BndX::Let(Arc::new(binders))
}
BndX::Quant(quant, binders, ts) => {
map.push_scope(true);
for b in binders.iter() {
let _ = map.insert(b.name.clone(), true);
}
let mut triggers: Vec<Trig> = Vec::new();
for t in ts.iter() {
let mut exprs: Vec<Exp> = Vec::new();
for exp in t.iter() {
exprs.push(map_exp_visitor_bind(exp, map, f)?);
}
triggers.push(Arc::new(exprs));
}
BndX::Quant(*quant, binders.clone(), Arc::new(triggers))
}
BndX::Lambda(binders) => {
map.push_scope(true);
for b in binders.iter() {
let _ = map.insert(b.name.clone(), false);
}
bnd.x.clone()
}
BndX::Choose(binders, ts, cond) => {
map.push_scope(true);
for b in binders.iter() {
let _ = map.insert(b.name.clone(), true);
}
let mut triggers: Vec<Trig> = Vec::new();
for t in ts.iter() {
let mut exprs: Vec<Exp> = Vec::new();
for exp in t.iter() {
exprs.push(map_exp_visitor_bind(exp, map, f)?);
}
triggers.push(Arc::new(exprs));
}
let cond = map_exp_visitor_bind(cond, map, f)?;
BndX::Choose(binders.clone(), Arc::new(triggers), cond)
}
};
let bnd = Spanned::new(bnd.span.clone(), bndx);
let e1 = map_exp_visitor_bind(e1, map, f)?;
map.pop_scope();
let expx = ExpX::Bind(bnd, e1);
let exp = exp_new(expx);
f(&exp, map)
}
}
}
pub(crate) fn map_exp_visitor<F>(exp: &Exp, f: &mut F) -> Exp
where
F: FnMut(&Exp) -> Exp,
{
let mut map: VisitorScopeMap = ScopeMap::new();
map_exp_visitor_bind(exp, &mut map, &mut |e, _| Ok(f(e))).unwrap()
}
pub(crate) fn exp_rename_vars(exp: &Exp, map: &HashMap<UniqueIdent, UniqueIdent>) -> Exp {
map_exp_visitor(exp, &mut |exp| match &exp.x {
ExpX::Var(x) if map.contains_key(x) => {
SpannedTyped::new(&exp.span, &exp.typ, ExpX::Var(map[x].clone()))
}
_ => exp.clone(),
})
}
pub(crate) fn map_stm_visitor<F>(stm: &Stm, f: &mut F) -> Result<Stm, VirErr>
where
F: FnMut(&Stm) -> Result<Stm, VirErr>,
{
match &stm.x {
StmX::Call(..) => f(stm),
StmX::Assert(_, _) => f(stm),
StmX::Assume(_) => f(stm),
StmX::Assign { .. } => f(stm),
StmX::AssertBV { .. } => f(stm),
StmX::Fuel(..) => f(stm),
StmX::DeadEnd(s) => {
let s = map_stm_visitor(s, f)?;
let stm = Spanned::new(stm.span.clone(), StmX::DeadEnd(s));
f(&stm)
}
StmX::If(cond, lhs, rhs) => {
let lhs = map_stm_visitor(lhs, f)?;
let rhs = rhs.as_ref().map(|rhs| map_stm_visitor(rhs, f)).transpose()?;
let stm = Spanned::new(stm.span.clone(), StmX::If(cond.clone(), lhs, rhs));
f(&stm)
}
StmX::While { cond_stms, cond_exp, body, invs, typ_inv_vars, modified_vars } => {
let mut cs: Vec<Stm> = Vec::new();
for s in cond_stms.iter() {
cs.push(map_stm_visitor(s, f)?);
}
let body = map_stm_visitor(body, f)?;
let stm = Spanned::new(
stm.span.clone(),
StmX::While {
cond_stms: Arc::new(cs),
cond_exp: cond_exp.clone(),
body,
invs: invs.clone(),
typ_inv_vars: typ_inv_vars.clone(),
modified_vars: modified_vars.clone(),
},
);
f(&stm)
}
StmX::AssertQuery { mode, typ_inv_vars, body } => {
let body = map_stm_visitor(body, f)?;
let stm = Spanned::new(
stm.span.clone(),
StmX::AssertQuery { mode: *mode, typ_inv_vars: typ_inv_vars.clone(), body },
);
f(&stm)
}
StmX::OpenInvariant(inv, ident, ty, body, atomicity) => {
let body = map_stm_visitor(body, f)?;
let stm = Spanned::new(
stm.span.clone(),
StmX::OpenInvariant(inv.clone(), ident.clone(), ty.clone(), body, *atomicity),
);
f(&stm)
}
StmX::Block(ss) => {
let mut stms: Vec<Stm> = Vec::new();
for s in ss.iter() {
stms.push(map_stm_visitor(s, f)?);
}
let stm = Spanned::new(stm.span.clone(), StmX::Block(Arc::new(stms)));
f(&stm)
}
}
}
pub(crate) fn map_stm_exp_visitor<F>(stm: &Stm, f: &F) -> Result<Stm, VirErr>
where
F: Fn(&Exp) -> Exp,
{
map_stm_visitor(stm, &mut |stm| {
let span = stm.span.clone();
let stm = match &stm.x {
StmX::Call(path, mode, typs, exps, dest) => {
let exps = Arc::new(vec_map(exps, f));
Spanned::new(
span,
StmX::Call(path.clone(), *mode, typs.clone(), exps, (*dest).clone()),
)
}
StmX::Assert(span2, exp) => Spanned::new(span, StmX::Assert(span2.clone(), f(exp))),
StmX::AssertBV(exp) => Spanned::new(span, StmX::AssertBV(f(exp))),
StmX::Assume(exp) => Spanned::new(span, StmX::Assume(f(exp))),
StmX::Assign { lhs: Dest { dest, is_init }, rhs } => {
let dest = f(dest);
let rhs = f(rhs);
Spanned::new(span, StmX::Assign { lhs: Dest { dest, is_init: *is_init }, rhs })
}
StmX::AssertQuery { .. } => stm.clone(),
StmX::Fuel(..) => stm.clone(),
StmX::DeadEnd(..) => stm.clone(),
StmX::If(exp, s1, s2) => {
let exp = f(exp);
Spanned::new(span, StmX::If(exp, s1.clone(), s2.clone()))
}
StmX::While { cond_stms, cond_exp, body, invs, typ_inv_vars, modified_vars } => {
let cond_exp = f(cond_exp);
let invs = Arc::new(vec_map(invs, f));
Spanned::new(
span,
StmX::While {
cond_stms: cond_stms.clone(),
cond_exp,
body: body.clone(),
invs,
typ_inv_vars: typ_inv_vars.clone(),
modified_vars: modified_vars.clone(),
},
)
}
StmX::OpenInvariant(inv, ident, ty, body, atomicity) => {
let inv = f(inv);
Spanned::new(
span,
StmX::OpenInvariant(inv, ident.clone(), ty.clone(), body.clone(), *atomicity),
)
}
StmX::Block(_) => stm.clone(),
};
Ok(stm)
})
}
| 39.958412 | 98 | 0.446305 |
14842c72e3e35493e5fba7797644b0e74c27f2dd | 1,959 | use gtk::prelude::*;
use gtk::{gdk, gio};
fn main() {
let application = gtk::Application::new(Some("com.github.css"), gio::ApplicationFlags::empty());
application.connect_startup(|app| {
// The CSS "magic" happens here.
let provider = gtk::CssProvider::new();
// Load the CSS file
let style = include_bytes!("style.css");
provider.load_from_data(style).expect("Failed to load CSS");
// We give the CssProvided to the default screen so the CSS rules we added
// can be applied to our window.
gtk::StyleContext::add_provider_for_screen(
&gdk::Screen::default().expect("Error initializing gtk css provider."),
&provider,
gtk::STYLE_PROVIDER_PRIORITY_APPLICATION,
);
// We build the application UI.
build_ui(app);
});
application.run();
}
fn build_ui(application: >k::Application) {
let window = gtk::ApplicationWindow::new(application);
window.set_title("CSS");
window.set_position(gtk::WindowPosition::Center);
// The container container.
let vbox = gtk::Box::new(gtk::Orientation::Vertical, 0);
let label = gtk::Button::with_label("hover me!");
// We need to name it in order to be able to use its name as a CSS label to
// apply CSS on it.
gtk::WidgetExt::set_widget_name(&label, "label1");
let entry = gtk::Entry::new();
// We need to name it in order to apply CSS on it.
gtk::WidgetExt::set_widget_name(&entry, "entry1");
entry.set_text("Some text");
let combo = gtk::ComboBoxText::new();
combo.append_text("option 1");
combo.append_text("option 2");
combo.append_text("option 3");
combo.set_active(Some(0));
vbox.add(&label);
vbox.add(&entry);
vbox.add(&combo);
// Then we add the container inside our window.
window.add(&vbox);
application.connect_activate(move |_| {
window.show_all();
});
}
| 31.095238 | 100 | 0.628892 |
1c409a14b728016303b8dc5d8d066d05f5e658a6 | 1,569 | // Rather convoluted setup where we infer a relationship between two
// free regions in the closure signature (`'a` and `'b`) on the basis
// of a relationship between two bound regions (`'x` and `'y`).
//
// The idea is that, thanks to invoking `demand_y`, `'x: 'y` must
// hold, where `'x` and `'y` are bound regions. The closure can't
// prove that directly, and because `'x` and `'y` are bound it cannot
// ask the caller to prove it either. But it has bounds on `'x` and
// `'y` in terms of `'a` and `'b`, and it can propagate a relationship
// between `'a` and `'b` to the caller.
//
// Note: the use of `Cell` here is to introduce invariance. One less
// variable.
// compile-flags:-Zborrowck=mir -Zverbose
#![feature(rustc_attrs)]
use std::cell::Cell;
// Callee knows that:
//
// 'x: 'a
// 'b: 'y
//
// so if we are going to ensure that `'x: 'y`, then `'a: 'b` must
// hold.
fn establish_relationships<'a, 'b, F>(_cell_a: &Cell<&'a u32>, _cell_b: &Cell<&'b u32>, _closure: F)
where
F: for<'x, 'y> FnMut(
&Cell<&'a &'x u32>, // shows that 'x: 'a
&Cell<&'y &'b u32>, // shows that 'b: 'y
&Cell<&'x u32>,
&Cell<&'y u32>,
),
{
}
fn demand_y<'x, 'y>(_cell_x: &Cell<&'x u32>, _cell_y: &Cell<&'y u32>, _y: &'y u32) {}
#[rustc_regions]
fn supply<'a, 'b>(cell_a: Cell<&'a u32>, cell_b: Cell<&'b u32>) {
establish_relationships(&cell_a, &cell_b, |_outlives1, _outlives2, x, y| {
// Only works if 'x: 'y:
demand_y(x, y, x.get())
//~^ ERROR lifetime may not live long enough
});
}
fn main() {}
| 30.764706 | 100 | 0.597833 |
9b3ec702c75da600c3ddb31a1fa2aa5e8320febe | 1,304 | // Check that repeated type variables are correctly handled
#![allow(unused)]
#![feature(type_ascription)]
type PairUncoupled<'a, 'b, T> = (&'a T, &'b T);
type PairCoupledTypes<T> = (T, T);
type PairCoupledRegions<'a, T> = (&'a T, &'a T);
fn uncoupled_wilds_rhs<'a>(_x: &'a u32, s: &'static u32) -> &'static u32 {
let ((y, _z),) = ((s, _x),): (PairUncoupled<_>,);
y // OK
}
fn coupled_wilds_rhs<'a>(_x: &'a u32, s: &'static u32) -> &'static u32 {
let ((y, _z),) = ((s, _x),): (PairCoupledTypes<_>,);
y //~ ERROR lifetime may not live long enough
}
fn coupled_regions_rhs<'a>(_x: &'a u32, s: &'static u32) -> &'static u32 {
let ((y, _z),) = ((s, _x),): (PairCoupledRegions<_>,);
y //~ ERROR lifetime may not live long enough
}
fn cast_uncoupled_wilds_rhs<'a>(_x: &'a u32, s: &'static u32) -> &'static u32 {
let ((y, _z),) = ((s, _x),) as (PairUncoupled<_>,);
y // OK
}
fn cast_coupled_wilds_rhs<'a>(_x: &'a u32, s: &'static u32) -> &'static u32 {
let ((y, _z),) = ((s, _x),) as (PairCoupledTypes<_>,);
y //~ ERROR lifetime may not live long enough
}
fn cast_coupled_regions_rhs<'a>(_x: &'a u32, s: &'static u32) -> &'static u32 {
let ((y, _z),) = ((s, _x),) as (PairCoupledRegions<_>,);
y //~ ERROR lifetime may not live long enough
}
fn main() {}
| 31.804878 | 79 | 0.581288 |
64c20a462229342f7f678799cea13ad9b5b0c711 | 2,386 | //! Many kinds of items or constructs can have generic parameters: functions,
//! structs, impls, traits, etc. This module provides a common HIR for these
//! generic parameters. See also the `Generics` type and the `generics_of` query
//! in rustc.
use std::sync::Arc;
use ra_syntax::ast::{self, NameOwner, TypeParamsOwner};
use crate::{db::HirDatabase, Name, AsName, Function, Struct, Enum, Trait, Type};
/// Data about a generic parameter (to a function, struct, impl, ...).
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct GenericParam {
pub(crate) idx: u32,
pub(crate) name: Name,
}
/// Data about the generic parameters of a function, struct, impl, etc.
#[derive(Clone, PartialEq, Eq, Debug, Default)]
pub struct GenericParams {
pub(crate) params: Vec<GenericParam>,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum GenericDef {
Function(Function),
Struct(Struct),
Enum(Enum),
Trait(Trait),
Type(Type),
}
impl_froms!(GenericDef: Function, Struct, Enum, Trait, Type);
impl GenericParams {
pub(crate) fn generic_params_query(
db: &impl HirDatabase,
def: GenericDef,
) -> Arc<GenericParams> {
let mut generics = GenericParams::default();
match def {
GenericDef::Function(it) => generics.fill(&*it.source(db).1),
GenericDef::Struct(it) => generics.fill(&*it.source(db).1),
GenericDef::Enum(it) => generics.fill(&*it.source(db).1),
GenericDef::Trait(it) => generics.fill(&*it.source(db).1),
GenericDef::Type(it) => generics.fill(&*it.source(db).1),
}
Arc::new(generics)
}
fn fill(&mut self, node: &impl TypeParamsOwner) {
if let Some(params) = node.type_param_list() {
self.fill_params(params)
}
}
fn fill_params(&mut self, params: &ast::TypeParamList) {
for (idx, type_param) in params.type_params().enumerate() {
let name = type_param
.name()
.map(AsName::as_name)
.unwrap_or_else(Name::missing);
let param = GenericParam {
idx: idx as u32,
name,
};
self.params.push(param);
}
}
pub(crate) fn find_by_name(&self, name: &Name) -> Option<&GenericParam> {
self.params.iter().find(|p| &p.name == name)
}
}
| 31.394737 | 80 | 0.603521 |
de1c61a6f633c6a49b4a02ff715cbdf71e090972 | 21,170 | #![allow(clippy::float_cmp)]
use crate::utils::{clip, higher, sext, unsext};
use if_chain::if_chain;
use rustc::hir::def::{DefKind, Res};
use rustc::hir::*;
use rustc::lint::LateContext;
use rustc::ty::subst::{Subst, SubstsRef};
use rustc::ty::{self, Instance, Ty, TyCtxt};
use rustc::{bug, span_bug};
use rustc_data_structures::sync::Lrc;
use std::cmp::Ordering::{self, Equal};
use std::cmp::PartialOrd;
use std::convert::TryInto;
use std::hash::{Hash, Hasher};
use syntax::ast::{FloatTy, LitKind};
use syntax_pos::symbol::Symbol;
/// A `LitKind`-like enum to fold constant `Expr`s into.
#[derive(Debug, Clone)]
pub enum Constant {
/// A `String` (e.g., "abc").
Str(String),
/// A binary string (e.g., `b"abc"`).
Binary(Lrc<Vec<u8>>),
/// A single `char` (e.g., `'a'`).
Char(char),
/// An integer's bit representation.
Int(u128),
/// An `f32`.
F32(f32),
/// An `f64`.
F64(f64),
/// `true` or `false`.
Bool(bool),
/// An array of constants.
Vec(Vec<Constant>),
/// Also an array, but with only one constant, repeated N times.
Repeat(Box<Constant>, u64),
/// A tuple of constants.
Tuple(Vec<Constant>),
/// A raw pointer.
RawPtr(u128),
/// A literal with syntax error.
Err(Symbol),
}
impl PartialEq for Constant {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(&Self::Str(ref ls), &Self::Str(ref rs)) => ls == rs,
(&Self::Binary(ref l), &Self::Binary(ref r)) => l == r,
(&Self::Char(l), &Self::Char(r)) => l == r,
(&Self::Int(l), &Self::Int(r)) => l == r,
(&Self::F64(l), &Self::F64(r)) => {
// We want `Fw32 == FwAny` and `FwAny == Fw64`, and by transitivity we must have
// `Fw32 == Fw64`, so don’t compare them.
// `to_bits` is required to catch non-matching 0.0, -0.0, and NaNs.
l.to_bits() == r.to_bits()
},
(&Self::F32(l), &Self::F32(r)) => {
// We want `Fw32 == FwAny` and `FwAny == Fw64`, and by transitivity we must have
// `Fw32 == Fw64`, so don’t compare them.
// `to_bits` is required to catch non-matching 0.0, -0.0, and NaNs.
f64::from(l).to_bits() == f64::from(r).to_bits()
},
(&Self::Bool(l), &Self::Bool(r)) => l == r,
(&Self::Vec(ref l), &Self::Vec(ref r)) | (&Self::Tuple(ref l), &Self::Tuple(ref r)) => l == r,
(&Self::Repeat(ref lv, ref ls), &Self::Repeat(ref rv, ref rs)) => ls == rs && lv == rv,
// TODO: are there inter-type equalities?
_ => false,
}
}
}
impl Hash for Constant {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
std::mem::discriminant(self).hash(state);
match *self {
Self::Str(ref s) => {
s.hash(state);
},
Self::Binary(ref b) => {
b.hash(state);
},
Self::Char(c) => {
c.hash(state);
},
Self::Int(i) => {
i.hash(state);
},
Self::F32(f) => {
f64::from(f).to_bits().hash(state);
},
Self::F64(f) => {
f.to_bits().hash(state);
},
Self::Bool(b) => {
b.hash(state);
},
Self::Vec(ref v) | Self::Tuple(ref v) => {
v.hash(state);
},
Self::Repeat(ref c, l) => {
c.hash(state);
l.hash(state);
},
Self::RawPtr(u) => {
u.hash(state);
},
Self::Err(ref s) => {
s.hash(state);
},
}
}
}
impl Constant {
pub fn partial_cmp(tcx: TyCtxt<'_>, cmp_type: Ty<'_>, left: &Self, right: &Self) -> Option<Ordering> {
match (left, right) {
(&Self::Str(ref ls), &Self::Str(ref rs)) => Some(ls.cmp(rs)),
(&Self::Char(ref l), &Self::Char(ref r)) => Some(l.cmp(r)),
(&Self::Int(l), &Self::Int(r)) => {
if let ty::Int(int_ty) = cmp_type.kind {
Some(sext(tcx, l, int_ty).cmp(&sext(tcx, r, int_ty)))
} else {
Some(l.cmp(&r))
}
},
(&Self::F64(l), &Self::F64(r)) => l.partial_cmp(&r),
(&Self::F32(l), &Self::F32(r)) => l.partial_cmp(&r),
(&Self::Bool(ref l), &Self::Bool(ref r)) => Some(l.cmp(r)),
(&Self::Tuple(ref l), &Self::Tuple(ref r)) | (&Self::Vec(ref l), &Self::Vec(ref r)) => l
.iter()
.zip(r.iter())
.map(|(li, ri)| Self::partial_cmp(tcx, cmp_type, li, ri))
.find(|r| r.map_or(true, |o| o != Ordering::Equal))
.unwrap_or_else(|| Some(l.len().cmp(&r.len()))),
(&Self::Repeat(ref lv, ref ls), &Self::Repeat(ref rv, ref rs)) => {
match Self::partial_cmp(tcx, cmp_type, lv, rv) {
Some(Equal) => Some(ls.cmp(rs)),
x => x,
}
},
// TODO: are there any useful inter-type orderings?
_ => None,
}
}
}
/// Parses a `LitKind` to a `Constant`.
pub fn lit_to_constant(lit: &LitKind, ty: Option<Ty<'_>>) -> Constant {
use syntax::ast::*;
match *lit {
LitKind::Str(ref is, _) => Constant::Str(is.to_string()),
LitKind::Byte(b) => Constant::Int(u128::from(b)),
LitKind::ByteStr(ref s) => Constant::Binary(Lrc::clone(s)),
LitKind::Char(c) => Constant::Char(c),
LitKind::Int(n, _) => Constant::Int(n),
LitKind::Float(ref is, LitFloatType::Suffixed(fty)) => match fty {
FloatTy::F32 => Constant::F32(is.as_str().parse().unwrap()),
FloatTy::F64 => Constant::F64(is.as_str().parse().unwrap()),
},
LitKind::Float(ref is, LitFloatType::Unsuffixed) => match ty.expect("type of float is known").kind {
ty::Float(FloatTy::F32) => Constant::F32(is.as_str().parse().unwrap()),
ty::Float(FloatTy::F64) => Constant::F64(is.as_str().parse().unwrap()),
_ => bug!(),
},
LitKind::Bool(b) => Constant::Bool(b),
LitKind::Err(s) => Constant::Err(s),
}
}
pub fn constant<'c, 'cc>(
lcx: &LateContext<'c, 'cc>,
tables: &'c ty::TypeckTables<'cc>,
e: &Expr,
) -> Option<(Constant, bool)> {
let mut cx = ConstEvalLateContext {
lcx,
tables,
param_env: lcx.param_env,
needed_resolution: false,
substs: lcx.tcx.intern_substs(&[]),
};
cx.expr(e).map(|cst| (cst, cx.needed_resolution))
}
pub fn constant_simple<'c, 'cc>(
lcx: &LateContext<'c, 'cc>,
tables: &'c ty::TypeckTables<'cc>,
e: &Expr,
) -> Option<Constant> {
constant(lcx, tables, e).and_then(|(cst, res)| if res { None } else { Some(cst) })
}
/// Creates a `ConstEvalLateContext` from the given `LateContext` and `TypeckTables`.
pub fn constant_context<'c, 'cc>(
lcx: &'c LateContext<'c, 'cc>,
tables: &'c ty::TypeckTables<'cc>,
) -> ConstEvalLateContext<'c, 'cc> {
ConstEvalLateContext {
lcx,
tables,
param_env: lcx.param_env,
needed_resolution: false,
substs: lcx.tcx.intern_substs(&[]),
}
}
pub struct ConstEvalLateContext<'a, 'tcx> {
lcx: &'a LateContext<'a, 'tcx>,
tables: &'a ty::TypeckTables<'tcx>,
param_env: ty::ParamEnv<'tcx>,
needed_resolution: bool,
substs: SubstsRef<'tcx>,
}
impl<'c, 'cc> ConstEvalLateContext<'c, 'cc> {
/// Simple constant folding: Insert an expression, get a constant or none.
pub fn expr(&mut self, e: &Expr) -> Option<Constant> {
if let Some((ref cond, ref then, otherwise)) = higher::if_block(&e) {
return self.ifthenelse(cond, then, otherwise);
}
match e.kind {
ExprKind::Path(ref qpath) => self.fetch_path(qpath, e.hir_id),
ExprKind::Block(ref block, _) => self.block(block),
ExprKind::Lit(ref lit) => Some(lit_to_constant(&lit.node, self.tables.expr_ty_opt(e))),
ExprKind::Array(ref vec) => self.multi(vec).map(Constant::Vec),
ExprKind::Tup(ref tup) => self.multi(tup).map(Constant::Tuple),
ExprKind::Repeat(ref value, _) => {
let n = match self.tables.expr_ty(e).kind {
ty::Array(_, n) => n.eval_usize(self.lcx.tcx, self.lcx.param_env),
_ => span_bug!(e.span, "typeck error"),
};
self.expr(value).map(|v| Constant::Repeat(Box::new(v), n))
},
ExprKind::Unary(op, ref operand) => self.expr(operand).and_then(|o| match op {
UnNot => self.constant_not(&o, self.tables.expr_ty(e)),
UnNeg => self.constant_negate(&o, self.tables.expr_ty(e)),
UnDeref => Some(o),
}),
ExprKind::Binary(op, ref left, ref right) => self.binop(op, left, right),
ExprKind::Call(ref callee, ref args) => {
// We only handle a few const functions for now.
if_chain! {
if args.is_empty();
if let ExprKind::Path(qpath) = &callee.kind;
let res = self.tables.qpath_res(qpath, callee.hir_id);
if let Some(def_id) = res.opt_def_id();
let get_def_path = self.lcx.get_def_path(def_id, );
let def_path = get_def_path
.iter()
.copied()
.map(Symbol::as_str)
.collect::<Vec<_>>();
if def_path[0] == "core";
if def_path[1] == "num";
if def_path[3] == "max_value";
if def_path.len() == 4;
then {
let value = match &*def_path[2] {
"<impl i8>" => i8::max_value() as u128,
"<impl i16>" => i16::max_value() as u128,
"<impl i32>" => i32::max_value() as u128,
"<impl i64>" => i64::max_value() as u128,
"<impl i128>" => i128::max_value() as u128,
_ => return None,
};
Some(Constant::Int(value))
}
else {
None
}
}
},
// TODO: add other expressions.
_ => None,
}
}
#[allow(clippy::cast_possible_wrap)]
fn constant_not(&self, o: &Constant, ty: Ty<'_>) -> Option<Constant> {
use self::Constant::*;
match *o {
Bool(b) => Some(Bool(!b)),
Int(value) => {
let value = !value;
match ty.kind {
ty::Int(ity) => Some(Int(unsext(self.lcx.tcx, value as i128, ity))),
ty::Uint(ity) => Some(Int(clip(self.lcx.tcx, value, ity))),
_ => None,
}
},
_ => None,
}
}
fn constant_negate(&self, o: &Constant, ty: Ty<'_>) -> Option<Constant> {
use self::Constant::*;
match *o {
Int(value) => {
let ity = match ty.kind {
ty::Int(ity) => ity,
_ => return None,
};
// sign extend
let value = sext(self.lcx.tcx, value, ity);
let value = value.checked_neg()?;
// clear unused bits
Some(Int(unsext(self.lcx.tcx, value, ity)))
},
F32(f) => Some(F32(-f)),
F64(f) => Some(F64(-f)),
_ => None,
}
}
/// Create `Some(Vec![..])` of all constants, unless there is any
/// non-constant part.
fn multi(&mut self, vec: &[Expr]) -> Option<Vec<Constant>> {
vec.iter().map(|elem| self.expr(elem)).collect::<Option<_>>()
}
/// Lookup a possibly constant expression from a `ExprKind::Path`.
fn fetch_path(&mut self, qpath: &QPath, id: HirId) -> Option<Constant> {
use rustc::mir::interpret::GlobalId;
let res = self.tables.qpath_res(qpath, id);
match res {
Res::Def(DefKind::Const, def_id) | Res::Def(DefKind::AssocConst, def_id) => {
let substs = self.tables.node_substs(id);
let substs = if self.substs.is_empty() {
substs
} else {
substs.subst(self.lcx.tcx, self.substs)
};
let instance = Instance::resolve(self.lcx.tcx, self.param_env, def_id, substs)?;
let gid = GlobalId {
instance,
promoted: None,
};
let result = self.lcx.tcx.const_eval(self.param_env.and(gid)).ok()?;
let result = miri_to_const(&result);
if result.is_some() {
self.needed_resolution = true;
}
result
},
// FIXME: cover all usable cases.
_ => None,
}
}
/// A block can only yield a constant if it only has one constant expression.
fn block(&mut self, block: &Block) -> Option<Constant> {
if block.stmts.is_empty() {
block.expr.as_ref().and_then(|b| self.expr(b))
} else {
None
}
}
fn ifthenelse(&mut self, cond: &Expr, then: &Expr, otherwise: Option<&Expr>) -> Option<Constant> {
if let Some(Constant::Bool(b)) = self.expr(cond) {
if b {
self.expr(&*then)
} else {
otherwise.as_ref().and_then(|expr| self.expr(expr))
}
} else {
None
}
}
fn binop(&mut self, op: BinOp, left: &Expr, right: &Expr) -> Option<Constant> {
let l = self.expr(left)?;
let r = self.expr(right);
match (l, r) {
(Constant::Int(l), Some(Constant::Int(r))) => match self.tables.expr_ty(left).kind {
ty::Int(ity) => {
let l = sext(self.lcx.tcx, l, ity);
let r = sext(self.lcx.tcx, r, ity);
let zext = |n: i128| Constant::Int(unsext(self.lcx.tcx, n, ity));
match op.node {
BinOpKind::Add => l.checked_add(r).map(zext),
BinOpKind::Sub => l.checked_sub(r).map(zext),
BinOpKind::Mul => l.checked_mul(r).map(zext),
BinOpKind::Div if r != 0 => l.checked_div(r).map(zext),
BinOpKind::Rem if r != 0 => l.checked_rem(r).map(zext),
BinOpKind::Shr => l.checked_shr(r.try_into().expect("invalid shift")).map(zext),
BinOpKind::Shl => l.checked_shl(r.try_into().expect("invalid shift")).map(zext),
BinOpKind::BitXor => Some(zext(l ^ r)),
BinOpKind::BitOr => Some(zext(l | r)),
BinOpKind::BitAnd => Some(zext(l & r)),
BinOpKind::Eq => Some(Constant::Bool(l == r)),
BinOpKind::Ne => Some(Constant::Bool(l != r)),
BinOpKind::Lt => Some(Constant::Bool(l < r)),
BinOpKind::Le => Some(Constant::Bool(l <= r)),
BinOpKind::Ge => Some(Constant::Bool(l >= r)),
BinOpKind::Gt => Some(Constant::Bool(l > r)),
_ => None,
}
},
ty::Uint(_) => match op.node {
BinOpKind::Add => l.checked_add(r).map(Constant::Int),
BinOpKind::Sub => l.checked_sub(r).map(Constant::Int),
BinOpKind::Mul => l.checked_mul(r).map(Constant::Int),
BinOpKind::Div => l.checked_div(r).map(Constant::Int),
BinOpKind::Rem => l.checked_rem(r).map(Constant::Int),
BinOpKind::Shr => l.checked_shr(r.try_into().expect("shift too large")).map(Constant::Int),
BinOpKind::Shl => l.checked_shl(r.try_into().expect("shift too large")).map(Constant::Int),
BinOpKind::BitXor => Some(Constant::Int(l ^ r)),
BinOpKind::BitOr => Some(Constant::Int(l | r)),
BinOpKind::BitAnd => Some(Constant::Int(l & r)),
BinOpKind::Eq => Some(Constant::Bool(l == r)),
BinOpKind::Ne => Some(Constant::Bool(l != r)),
BinOpKind::Lt => Some(Constant::Bool(l < r)),
BinOpKind::Le => Some(Constant::Bool(l <= r)),
BinOpKind::Ge => Some(Constant::Bool(l >= r)),
BinOpKind::Gt => Some(Constant::Bool(l > r)),
_ => None,
},
_ => None,
},
(Constant::F32(l), Some(Constant::F32(r))) => match op.node {
BinOpKind::Add => Some(Constant::F32(l + r)),
BinOpKind::Sub => Some(Constant::F32(l - r)),
BinOpKind::Mul => Some(Constant::F32(l * r)),
BinOpKind::Div => Some(Constant::F32(l / r)),
BinOpKind::Rem => Some(Constant::F32(l % r)),
BinOpKind::Eq => Some(Constant::Bool(l == r)),
BinOpKind::Ne => Some(Constant::Bool(l != r)),
BinOpKind::Lt => Some(Constant::Bool(l < r)),
BinOpKind::Le => Some(Constant::Bool(l <= r)),
BinOpKind::Ge => Some(Constant::Bool(l >= r)),
BinOpKind::Gt => Some(Constant::Bool(l > r)),
_ => None,
},
(Constant::F64(l), Some(Constant::F64(r))) => match op.node {
BinOpKind::Add => Some(Constant::F64(l + r)),
BinOpKind::Sub => Some(Constant::F64(l - r)),
BinOpKind::Mul => Some(Constant::F64(l * r)),
BinOpKind::Div => Some(Constant::F64(l / r)),
BinOpKind::Rem => Some(Constant::F64(l % r)),
BinOpKind::Eq => Some(Constant::Bool(l == r)),
BinOpKind::Ne => Some(Constant::Bool(l != r)),
BinOpKind::Lt => Some(Constant::Bool(l < r)),
BinOpKind::Le => Some(Constant::Bool(l <= r)),
BinOpKind::Ge => Some(Constant::Bool(l >= r)),
BinOpKind::Gt => Some(Constant::Bool(l > r)),
_ => None,
},
(l, r) => match (op.node, l, r) {
(BinOpKind::And, Constant::Bool(false), _) => Some(Constant::Bool(false)),
(BinOpKind::Or, Constant::Bool(true), _) => Some(Constant::Bool(true)),
(BinOpKind::And, Constant::Bool(true), Some(r)) | (BinOpKind::Or, Constant::Bool(false), Some(r)) => {
Some(r)
},
(BinOpKind::BitXor, Constant::Bool(l), Some(Constant::Bool(r))) => Some(Constant::Bool(l ^ r)),
(BinOpKind::BitAnd, Constant::Bool(l), Some(Constant::Bool(r))) => Some(Constant::Bool(l & r)),
(BinOpKind::BitOr, Constant::Bool(l), Some(Constant::Bool(r))) => Some(Constant::Bool(l | r)),
_ => None,
},
}
}
}
pub fn miri_to_const(result: &ty::Const<'_>) -> Option<Constant> {
use rustc::mir::interpret::{ConstValue, Scalar};
match result.val {
ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw { data: d, .. })) => match result.ty.kind {
ty::Bool => Some(Constant::Bool(d == 1)),
ty::Uint(_) | ty::Int(_) => Some(Constant::Int(d)),
ty::Float(FloatTy::F32) => Some(Constant::F32(f32::from_bits(
d.try_into().expect("invalid f32 bit representation"),
))),
ty::Float(FloatTy::F64) => Some(Constant::F64(f64::from_bits(
d.try_into().expect("invalid f64 bit representation"),
))),
ty::RawPtr(type_and_mut) => {
if let ty::Uint(_) = type_and_mut.ty.kind {
return Some(Constant::RawPtr(d));
}
None
},
// FIXME: implement other conversions.
_ => None,
},
ty::ConstKind::Value(ConstValue::Slice { data, start, end }) => match result.ty.kind {
ty::Ref(_, tam, _) => match tam.kind {
ty::Str => String::from_utf8(
data.inspect_with_undef_and_ptr_outside_interpreter(start..end)
.to_owned(),
)
.ok()
.map(Constant::Str),
_ => None,
},
_ => None,
},
// FIXME: implement other conversions.
_ => None,
}
}
| 41.428571 | 118 | 0.468824 |
3a3728c1982bb5b106f31b7338b3663c1462ab60 | 15,105 | pub use crate::resources::common::{Entity, ProjectTeam, Role};
/// The BucketAccessControl resource represents the Access Control Lists (ACLs) for buckets within
/// Google Cloud Storage. ACLs let you specify who has access to your data and to what extent.
///
/// ```text,ignore
/// Important: This method fails with a 400 Bad Request response for buckets with uniform
/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to
/// control access instead.
/// ```
///
/// There are three roles that can be assigned to an entity:
///
/// * READERs can get the bucket, though no acl property will be returned, and list the bucket's
/// objects.
/// * WRITERs are READERs, and they can insert objects into the bucket and delete the bucket's
/// objects.
/// * OWNERs are WRITERs, and they can get the acl property of a bucket, update a bucket, and call
/// all BucketAccessControl methods on the bucket.
#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct BucketAccessControl {
/// The kind of item this is. For bucket access control entries, this is always
/// `storage#bucketAccessControl`.
pub kind: String,
/// The ID of the access-control entry.
pub id: String,
/// The link to this access-control entry.
pub self_link: String,
/// The name of the bucket.
pub bucket: String,
/// The entity holding the permission, in one of the following forms:
///
/// * `user-userId`
/// * `user-email`
/// * `group-groupId`
/// * `group-email`
/// * `domain-domain`
/// * `project-team-projectId`
/// * `allUsers`
/// * `allAuthenticatedUsers`
///
/// Examples:
///
/// * The user [email protected] would be [email protected].
/// * The group [email protected] would be [email protected].
/// * To refer to all members of the G Suite for Business domain example.com, the entity would
/// be domain-example.com.
pub entity: Entity,
/// The access permission for the entity.
pub role: Role,
/// The email address associated with the entity, if any.
pub email: Option<String>,
/// The ID for the entity, if any.
pub entity_id: Option<String>,
/// The domain associated with the entity, if any.
pub domain: Option<String>,
/// The project team associated with the entity, if any.
pub project_team: Option<ProjectTeam>,
/// HTTP 1.1 Entity tag for the access-control entry.
pub etag: String,
}
/// Model that can be used to create a new BucketAccessControl object.
#[derive(Debug, PartialEq, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct NewBucketAccessControl {
/// The entity holding the permission, in one of the following forms:
///
/// * `user-userId`
/// * `user-email`
/// * `group-groupId`
/// * `group-email`
/// * `domain-domain`
/// * `project-team-projectId`
/// * `allUsers`
/// * `allAuthenticatedUsers`
///
/// Examples:
///
/// * The user [email protected] would be [email protected].
/// * The group [email protected] would be [email protected].
/// * To refer to all members of the G Suite for Business domain example.com, the entity would
/// be domain-example.com.
pub entity: Entity,
/// The access permission for the entity.
pub role: Role,
}
impl BucketAccessControl {
/// Create a new `BucketAccessControl` using the provided `NewBucketAccessControl`, related to
/// the `Bucket` provided by the `bucket_name` argument.
///
/// ### Important
/// Important: This method fails with a 400 Bad Request response for buckets with uniform
/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to
/// control access instead.
/// ### Example
/// ```rust,no_run
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use cloud_storage::bucket_access_control::{BucketAccessControl, NewBucketAccessControl};
/// use cloud_storage::bucket_access_control::{Role, Entity};
///
/// let new_bucket_access_control = NewBucketAccessControl {
/// entity: Entity::AllUsers,
/// role: Role::Reader,
/// };
/// BucketAccessControl::create("mybucket", &new_bucket_access_control).await?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "global-client")]
pub async fn create(
bucket: &str,
new_bucket_access_control: &NewBucketAccessControl,
) -> crate::Result<Self> {
crate::CLOUD_CLIENT
.bucket_access_control()
.create(bucket, new_bucket_access_control)
.await
}
/// The synchronous equivalent of `BucketAccessControl::create`.
///
/// ### Features
/// This function requires that the feature flag `sync` is enabled in `Cargo.toml`.
#[cfg(all(feature = "global-client", feature = "sync"))]
pub fn create_sync(
bucket: &str,
new_bucket_access_control: &NewBucketAccessControl,
) -> crate::Result<Self> {
crate::runtime()?.block_on(Self::create(bucket, new_bucket_access_control))
}
/// Returns all `BucketAccessControl`s related to this bucket.
///
/// ### Important
/// Important: This method fails with a 400 Bad Request response for buckets with uniform
/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to
/// control access instead.
/// ### Example
/// ```rust,no_run
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use cloud_storage::bucket_access_control::BucketAccessControl;
///
/// let acls = BucketAccessControl::list("mybucket").await?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "global-client")]
pub async fn list(bucket: &str) -> crate::Result<Vec<Self>> {
crate::CLOUD_CLIENT
.bucket_access_control()
.list(bucket)
.await
}
/// The synchronous equivalent of `BucketAccessControl::list`.
///
/// ### Features
/// This function requires that the feature flag `sync` is enabled in `Cargo.toml`.
#[cfg(all(feature = "global-client", feature = "sync"))]
pub fn list_sync(bucket: &str) -> crate::Result<Vec<Self>> {
crate::runtime()?.block_on(Self::list(bucket))
}
/// Returns the ACL entry for the specified entity on the specified bucket.
///
/// ### Important
/// Important: This method fails with a 400 Bad Request response for buckets with uniform
/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to
/// control access instead.
/// ### Example
/// ```rust,no_run
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity};
///
/// let controls = BucketAccessControl::read("mybucket", &Entity::AllUsers).await?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "global-client")]
pub async fn read(bucket: &str, entity: &Entity) -> crate::Result<Self> {
crate::CLOUD_CLIENT
.bucket_access_control()
.read(bucket, entity)
.await
}
/// The synchronous equivalent of `BucketAccessControl::read`.
///
/// ### Features
/// This function requires that the feature flag `sync` is enabled in `Cargo.toml`.
#[cfg(all(feature = "global-client", feature = "sync"))]
pub fn read_sync(bucket: &str, entity: &Entity) -> crate::Result<Self> {
crate::runtime()?.block_on(Self::read(bucket, entity))
}
/// Update this `BucketAccessControl`.
///
/// ### Important
/// Important: This method fails with a 400 Bad Request response for buckets with uniform
/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to
/// control access instead.
/// ### Example
/// ```rust,no_run
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity};
///
/// let mut acl = BucketAccessControl::read("mybucket", &Entity::AllUsers).await?;
/// acl.entity = Entity::AllAuthenticatedUsers;
/// acl.update().await?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "global-client")]
pub async fn update(&self) -> crate::Result<Self> {
crate::CLOUD_CLIENT
.bucket_access_control()
.update(self)
.await
}
/// The synchronous equivalent of `BucketAccessControl::update`.
///
/// ### Features
/// This function requires that the feature flag `sync` is enabled in `Cargo.toml`.
#[cfg(all(feature = "global-client", feature = "sync"))]
pub fn update_sync(&self) -> crate::Result<Self> {
crate::runtime()?.block_on(self.update())
}
/// Permanently deletes the ACL entry for the specified entity on the specified bucket.
///
/// ### Important
/// Important: This method fails with a 400 Bad Request response for buckets with uniform
/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to
/// control access instead.
/// ### Example
/// ```rust,no_run
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity};
///
/// let controls = BucketAccessControl::read("mybucket", &Entity::AllUsers).await?;
/// controls.delete().await?;
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "global-client")]
pub async fn delete(self) -> crate::Result<()> {
crate::CLOUD_CLIENT
.bucket_access_control()
.delete(self)
.await
}
/// The synchronous equivalent of `BucketAccessControl::delete`.
///
/// ### Features
/// This function requires that the feature flag `sync` is enabled in `Cargo.toml`.
#[cfg(all(feature = "global-client", feature = "sync"))]
pub fn delete_sync(self) -> crate::Result<()> {
crate::runtime()?.block_on(self.delete())
}
}
#[cfg(all(test, feature = "global-client"))]
mod tests {
use super::*;
#[tokio::test]
async fn create() -> Result<(), Box<dyn std::error::Error>> {
let bucket = crate::read_test_bucket().await;
let new_bucket_access_control = NewBucketAccessControl {
entity: Entity::AllUsers,
role: Role::Reader,
};
BucketAccessControl::create(&bucket.name, &new_bucket_access_control).await?;
Ok(())
}
#[tokio::test]
async fn list() -> Result<(), Box<dyn std::error::Error>> {
let bucket = crate::read_test_bucket().await;
BucketAccessControl::list(&bucket.name).await?;
Ok(())
}
#[tokio::test]
async fn read() -> Result<(), Box<dyn std::error::Error>> {
let bucket = crate::read_test_bucket().await;
BucketAccessControl::read(&bucket.name, &Entity::AllUsers).await?;
Ok(())
}
#[tokio::test]
async fn update() -> Result<(), Box<dyn std::error::Error>> {
// use a seperate bucket to prevent synchronization issues
let bucket = crate::create_test_bucket("test-update-bucket-access-controls").await;
let new_bucket_access_control = NewBucketAccessControl {
entity: Entity::AllUsers,
role: Role::Reader,
};
BucketAccessControl::create(&bucket.name, &new_bucket_access_control).await?;
let mut acl = BucketAccessControl::read(&bucket.name, &Entity::AllUsers).await?;
acl.entity = Entity::AllAuthenticatedUsers;
acl.update().await?;
bucket.delete().await?;
Ok(())
}
#[tokio::test]
async fn delete() -> Result<(), Box<dyn std::error::Error>> {
// use a seperate bucket to prevent synchronization issues
let bucket = crate::create_test_bucket("test-delete-bucket-access-controls").await;
let new_bucket_access_control = NewBucketAccessControl {
entity: Entity::AllUsers,
role: Role::Reader,
};
BucketAccessControl::create(&bucket.name, &new_bucket_access_control).await?;
let acl = BucketAccessControl::read(&bucket.name, &Entity::AllUsers).await?;
acl.delete().await?;
bucket.delete().await?;
Ok(())
}
#[cfg(all(feature = "global-client", feature = "sync"))]
mod sync {
use super::*;
#[test]
fn create() -> Result<(), Box<dyn std::error::Error>> {
let bucket = crate::read_test_bucket_sync();
let new_bucket_access_control = NewBucketAccessControl {
entity: Entity::AllUsers,
role: Role::Reader,
};
BucketAccessControl::create_sync(&bucket.name, &new_bucket_access_control)?;
Ok(())
}
#[test]
fn list() -> Result<(), Box<dyn std::error::Error>> {
let bucket = crate::read_test_bucket_sync();
BucketAccessControl::list_sync(&bucket.name)?;
Ok(())
}
#[test]
fn read() -> Result<(), Box<dyn std::error::Error>> {
let bucket = crate::read_test_bucket_sync();
BucketAccessControl::read_sync(&bucket.name, &Entity::AllUsers)?;
Ok(())
}
#[test]
fn update() -> Result<(), Box<dyn std::error::Error>> {
// use a seperate bucket to prevent synchronization issues
let bucket = crate::create_test_bucket_sync("test-update-bucket-access-controls");
let new_bucket_access_control = NewBucketAccessControl {
entity: Entity::AllUsers,
role: Role::Reader,
};
BucketAccessControl::create_sync(&bucket.name, &new_bucket_access_control)?;
let mut acl = BucketAccessControl::read_sync(&bucket.name, &Entity::AllUsers)?;
acl.entity = Entity::AllAuthenticatedUsers;
acl.update_sync()?;
bucket.delete_sync()?;
Ok(())
}
#[test]
fn delete() -> Result<(), Box<dyn std::error::Error>> {
// use a seperate bucket to prevent synchronization issues
let bucket = crate::create_test_bucket_sync("test-delete-bucket-access-controls");
let new_bucket_access_control = NewBucketAccessControl {
entity: Entity::AllUsers,
role: Role::Reader,
};
BucketAccessControl::create_sync(&bucket.name, &new_bucket_access_control)?;
let acl = BucketAccessControl::read_sync(&bucket.name, &Entity::AllUsers)?;
acl.delete_sync()?;
bucket.delete_sync()?;
Ok(())
}
}
}
| 38.435115 | 98 | 0.608606 |
8f6739af4b607718994e6413343c4658abcdba7c | 2,330 | #![feature(test)]
extern crate test;
extern crate mrkl;
#[cfg(feature = "digest")]
mod digest {
extern crate digest_hash;
extern crate sha2;
mod prelude {
pub use test::black_box;
pub use test::Bencher;
pub use super::digest_hash::digest::generic_array::GenericArray;
pub use super::sha2::Digest;
pub use super::sha2::Sha256;
pub use mrkl::digest::ByteDigestHasher;
pub type Hasher = ByteDigestHasher<Sha256>;
}
mod sequential {
use super::prelude::*;
use mrkl::tree::Builder;
use std::iter;
#[bench]
fn pure_digest_perf_100x4k(b: &mut Bencher) {
let block: &[u8] = &[0u8; 4 * 1024];
b.iter(|| {
let mut hash = GenericArray::default();
for _ in 0..100 {
hash = Sha256::digest(block);
hash = black_box(hash);
}
for _ in 0..100 - 1 {
let mut digest = Sha256::new();
digest.input(&[0u8][..]);
digest.input(&hash);
digest.input(&[0u8][..]);
digest.input(&hash);
hash = digest.result();
hash = black_box(hash);
}
})
}
#[bench]
fn complete_tree_100x4k(b: &mut Bencher) {
let block: &[u8] = &[0u8; 4 * 1024];
let seq: Vec<_> = iter::repeat(block).take(100).collect();
b.iter(|| {
let builder = Builder::<Hasher, _>::new();
let tree = builder.complete_tree_from(&seq).unwrap();
black_box(tree);
})
}
}
#[cfg(feature = "parallel")]
mod parallel {
extern crate rayon;
use super::prelude::*;
use mrkl::tree::parallel::Builder;
use self::rayon::iter;
#[bench]
fn complete_tree_100x4k(b: &mut Bencher) {
let block: &[u8] = &[0u8; 4 * 1024];
b.iter(|| {
let iter = iter::repeatn(block, 100);
let builder = Builder::<Hasher, _>::new();
let tree = builder.complete_tree_from(iter).unwrap();
black_box(tree);
})
}
}
}
| 26.781609 | 72 | 0.464807 |
4b239cf6568f71b9d25ee08dba4dbb5ebce3bd71 | 1,290 | use std::str::FromStr;
use serde::{Deserialize, Serialize};
use wirefilter::Scheme;
use crate::output::OutputType;
lazy_static::lazy_static! {
pub(crate) static ref SCHEME: Scheme = Scheme! {
elb_status_code: Int,
user_agent: Bytes,
target_group_arn: Bytes,
};
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Pipeline {
pub filter: String,
pub output: OutputType,
}
impl Pipeline {
pub fn get_filter(&self) -> wirefilter::Filter {
let ast = SCHEME
.parse(self.filter.as_str())
.unwrap_or_else(|_| panic!("Failed to parse the input filter: {:?}", self.filter));
ast.compile()
}
}
#[derive(Debug)]
pub struct Pipelines(Vec<Pipeline>);
impl Pipelines {
pub fn new(pipelines: Vec<Pipeline>) -> Self {
Self(pipelines)
}
pub fn inner(&self) -> &Vec<Pipeline> {
&self.0
}
}
impl FromStr for Pipelines {
type Err = anyhow::Error;
fn from_str(json: &str) -> Result<Self, Self::Err> {
Ok(Self(serde_json::from_str(json)?))
}
}
pub fn compile_pipelines(pipelines: &Pipelines) -> Vec<(&Pipeline, wirefilter::Filter)> {
pipelines
.inner()
.iter()
.map(|pipeline| (pipeline, pipeline.get_filter()))
.collect()
}
| 22.241379 | 95 | 0.610078 |
ebd1ed8b265743744fe76c79ea2aebb0e68f4ed3 | 7,736 | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Synchronization on the GPU.
//!
//! Just like for CPU code, you have to ensure that buffers and images are not accessed mutably by
//! multiple GPU queues simultaneously and that they are not accessed mutably by the CPU and by the
//! GPU simultaneously.
//!
//! This safety is enforced at runtime by vulkano but it is not magic and you will require some
//! knowledge if you want to avoid errors.
//!
//! # Futures
//!
//! Whenever you ask the GPU to start an operation by using a function of the vulkano library (for
//! example executing a command buffer), this function will return a *future*. A future is an
//! object that implements [the `GpuFuture` trait](trait.GpuFuture.html) and that represents the
//! point in time when this operation is over.
//!
//! No function in vulkano immediately sends an operation to the GPU (with the exception of some
//! unsafe low-level functions). Instead they return a future that is in the pending state. Before
//! the GPU actually starts doing anything, you have to *flush* the future by calling the `flush()`
//! method or one of its derivatives.
//!
//! Futures serve several roles:
//!
//! - Futures can be used to build dependencies between operations and makes it possible to ask
//! that an operation starts only after a previous operation is finished.
//! - Submitting an operation to the GPU is a costly operation. By chaining multiple operations
//! with futures you will submit them all at once instead of one by one, thereby reducing this
//! cost.
//! - Futures keep alive the resources and objects used by the GPU so that they don't get destroyed
//! while they are still in use.
//!
//! The last point means that you should keep futures alive in your program for as long as their
//! corresponding operation is potentially still being executed by the GPU. Dropping a future
//! earlier will block the current thread (after flushing, if necessary) until the GPU has finished
//! the operation, which is usually not what you want.
//!
//! If you write a function that submits an operation to the GPU in your program, you are
//! encouraged to let this function return the corresponding future and let the caller handle it.
//! This way the caller will be able to chain multiple futures together and decide when it wants to
//! keep the future alive or drop it.
//!
//! # Executing an operation after a future
//!
//! Respecting the order of operations on the GPU is important, as it is what *proves* vulkano that
//! what you are doing is indeed safe. For example if you submit two operations that modify the
//! same buffer, then you need to execute one after the other instead of submitting them
//! independantly. Failing to do so would mean that these two operations could potentially execute
//! simultaneously on the GPU, which would be unsafe.
//!
//! This is done by calling one of the methods of the `GpuFuture` trait. For example calling
//! `prev_future.then_execute(command_buffer)` takes ownership of `prev_future` and will make sure
//! to only start executing `command_buffer` after the moment corresponding to `prev_future`
//! happens. The object returned by the `then_execute` function is itself a future that corresponds
//! to the moment when the execution of `command_buffer` ends.
//!
//! ## Between two different GPU queues
//!
//! When you want to perform an operation after another operation on two different queues, you
//! **must** put a *semaphore* between them. Failure to do so would result in a runtime error.
//! Adding a semaphore is a simple as replacing `prev_future.then_execute(...)` with
//! `prev_future.then_signal_semaphore().then_execute(...)`.
//!
//! > **Note**: A common use-case is using a transfer queue (ie. a queue that is only capable of
//! > performing transfer operations) to write data to a buffer, then read that data from the
//! > rendering queue.
//!
//! What happens when you do so is that the first queue will execute the first set of operations
//! (represented by `prev_future` in the example), then put a semaphore in the signalled state.
//! Meanwhile the second queue blocks (if necessary) until that same semaphore gets signalled, and
//! then only will execute the second set of operations.
//!
//! Since you want to avoid blocking the second queue as much as possible, you probably want to
//! flush the operation to the first queue as soon as possible. This can easily be done by calling
//! `then_signal_semaphore_and_flush()` instead of `then_signal_semaphore()`.
//!
//! ## Between several different GPU queues
//!
//! The `then_signal_semaphore()` method is appropriate when you perform an operation in one queue,
//! and want to see the result in another queue. However in some situations you want to start
//! multiple operations on several different queues.
//!
//! TODO: this is not yet implemented
//!
//! # Fences
//!
//! A `Fence` is an object that is used to signal the CPU when an operation on the GPU is finished.
//!
//! Signalling a fence is done by calling `then_signal_fence()` on a future. Just like semaphores,
//! you are encouraged to use `then_signal_fence_and_flush()` instead.
//!
//! Signalling a fence is kind of a "terminator" to a chain of futures.
//!
//! TODO: lots of problems with how to use fences
//! TODO: talk about fence + semaphore simultaneously
//! TODO: talk about using fences to clean up
use device::Queue;
use std::sync::Arc;
pub use self::event::Event;
pub use self::fence::Fence;
pub use self::fence::FenceWaitError;
pub use self::future::AccessCheckError;
pub use self::future::AccessError;
pub use self::future::FenceSignalFuture;
pub use self::future::FlushError;
pub use self::future::GpuFuture;
pub use self::future::JoinFuture;
pub use self::future::NowFuture;
pub use self::future::SemaphoreSignalFuture;
pub use self::future::now;
pub use self::pipeline::AccessFlagBits;
pub use self::pipeline::PipelineStages;
pub use self::semaphore::Semaphore;
mod event;
mod fence;
mod future;
mod pipeline;
mod semaphore;
/// Declares in which queue(s) a resource can be used.
///
/// When you create a buffer or an image, you have to tell the Vulkan library in which queue
/// families it will be used. The vulkano library requires you to tell in which queue famiily
/// the resource will be used, even for exclusive mode.
#[derive(Debug, Clone, PartialEq, Eq)]
// TODO: remove
pub enum SharingMode {
/// The resource is used is only one queue family.
Exclusive(u32),
/// The resource is used in multiple queue families. Can be slower than `Exclusive`.
Concurrent(Vec<u32>), // TODO: Vec is too expensive here
}
impl<'a> From<&'a Arc<Queue>> for SharingMode {
#[inline]
fn from(queue: &'a Arc<Queue>) -> SharingMode {
SharingMode::Exclusive(queue.family().id())
}
}
impl<'a> From<&'a [&'a Arc<Queue>]> for SharingMode {
#[inline]
fn from(queues: &'a [&'a Arc<Queue>]) -> SharingMode {
SharingMode::Concurrent(queues.iter().map(|queue| queue.family().id()).collect())
}
}
/// Declares in which queue(s) a resource can be used.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Sharing<I>
where I: Iterator<Item = u32>
{
/// The resource is used is only one queue family.
Exclusive,
/// The resource is used in multiple queue families. Can be slower than `Exclusive`.
Concurrent(I),
}
| 45.775148 | 99 | 0.730352 |
26eb9ff2884e8b47951523200c6a93412a3621cc | 3,537 | use cgmath::{Point3, EuclideanSpace, InnerSpace, Vector3};
use collision::{Aabb3};
use common::surroundings_loader;
use entity;
use mob;
use server;
fn center(bounds: &Aabb3<f32>) -> Point3<f32> {
(bounds.min + bounds.max.to_vec()) * 0.5
}
// TODO: Locking is hard to reason about. Make it saner.
// The goal should be to prevent coder error causing deadlock.
pub fn init_mobs(
server: &server::T,
) {
fn mob_behavior(world: &server::T, mob: &mut mob::Mob) {
fn to_player(world: &server::T, mob: &mob::Mob) -> Option<Vector3<f32>> {
let mob_posn = center(world.physics.lock().unwrap().get_bounds(mob.physics_id).unwrap());
let players: Vec<entity::id::Misc> = world.players.lock().unwrap().values().map(|player| player.physics_id).collect();
let mut players = players.into_iter();
players.next().map(|id| {
let mut min_v = center(world.physics.lock().unwrap().get_bounds(id).unwrap()) - mob_posn;
let mut min_d = min_v.magnitude2();
for id in players {
let v = center(world.physics.lock().unwrap().get_bounds(id).unwrap()) - mob_posn;
let d = v.magnitude2();
if d < min_d {
min_v = v;
min_d = d;
}
}
min_v
})
}
{
match to_player(world, mob) {
None => { mob.behavior = mob_behavior },
Some(to_player) => {
if to_player.magnitude() < 2.0 {
mob.behavior = wait_for_distance;
}
},
}
}
fn wait_for_distance(world: &server::T, mob: &mut mob::Mob) {
match to_player(world, mob) {
None => { mob.behavior = mob_behavior },
Some(to_player) => {
if to_player.magnitude() > 8.0 {
mob.behavior = follow_player;
}
},
}
}
fn follow_player(world: &server::T, mob: &mut mob::Mob) {
match to_player(world, mob) {
None => { mob.behavior = mob_behavior },
Some(to_player) => {
if to_player.magnitude2() < 4.0 {
mob.behavior = wait_to_reset;
mob.speed = Vector3::new(0.0, 0.0, 0.0);
} else {
mob.speed = to_player * (0.5);
}
},
}
}
fn wait_to_reset(world: &server::T, mob: &mut mob::Mob) {
match to_player(world, mob) {
None => { mob.behavior = mob_behavior },
Some(to_player) => {
if to_player.magnitude() >= 2.0 {
mob.behavior = mob_behavior;
}
},
}
}
}
add_mob(
server,
// TODO: shift upward until outside terrain
Point3::new(0.0, 64.0, -1.0),
mob_behavior,
);
}
fn add_mob(
server: &server::T,
low_corner: Point3<f32>,
behavior: mob::Behavior,
) {
let bounds = Aabb3::new(low_corner, low_corner + (&Vector3::new(1.0, 2.0, 1.0 as f32)));
let entity_id = server.mob_allocator.lock().unwrap().allocate();
let physics_id = server.misc_allocator.lock().unwrap().allocate();
let mob =
mob::Mob {
position : (bounds.min + bounds.max.to_vec()) * 0.5,
speed : Vector3::new(0.0, 0.0, 0.0),
behavior : behavior,
entity_id : entity_id,
physics_id : physics_id,
owner_id : server.owner_allocator.lock().unwrap().allocate(),
surroundings_loader : surroundings_loader::new(8, Vec::new()),
};
server.physics.lock().unwrap().insert_misc(physics_id, &bounds);
server.mobs.lock().unwrap().insert(entity_id, mob);
}
| 28.991803 | 124 | 0.558948 |
3a93cdc3935e8acf83496d744b26f524f487ce4f | 1,635 | use crate::r#type::Type;
/// Defines a struct field.
#[derive(Debug, Clone)]
pub struct Field {
/// visibility
pub vis: Option<String>,
/// Field name
pub name: String,
/// Field type
pub ty: Type,
/// Field documentation
pub documentation: Vec<String>,
/// Field annotation
pub annotation: Vec<String>,
}
impl Field {
/// Return a field definition with the provided name and type
pub fn new<S, T>(name: S, ty: T) -> Self
where
S: Into<String>,
T: Into<Type>,
{
Field {
vis: None,
name: name.into(),
ty: ty.into(),
documentation: vec![],
annotation: vec![],
}
}
/// Set field's visibility.
pub fn vis(&mut self, vis: impl Into<String>) {
self.vis = Some(vis.into());
}
/// Set field's documentation.
pub fn doc<II, I, S>(
&mut self, documentation: II,
) -> &mut Self
where
II: IntoIterator<IntoIter=I, Item=S>,
I: Iterator<Item=S>,
S: Into<String>,
{
self.documentation = documentation.into_iter()
.map(|doc| doc.into())
.collect();
self
}
/// Set field's annotation.
pub fn annotation<II, I, S>(
&mut self,
annotation: II,
) -> &mut Self
where
II: IntoIterator<IntoIter=I, Item=S>,
I: Iterator<Item=S>,
S: Into<String>,
{
self.annotation = annotation.into_iter()
.map(|ann| ann.into())
.collect();
self
}
}
| 21.513158 | 65 | 0.496024 |
d5f535b68d79524e98d6e221d9ec9819e9ad1a5d | 3,419 | // errorsn.rs
// This is a bigger error exercise than the previous ones!
// You can do it! :)
//
// Edit the `read_and_validate` function ONLY. Don't create any Errors
// that do not already exist.
//
// So many things could go wrong!
//
// - Reading from stdin could produce an io::Error
// - Parsing the input could produce a num::ParseIntError
// - Validating the input could produce a CreationError (defined below)
//
// How can we lump these errors into one general error? That is, what
// type goes where the question marks are, and how do we return
// that type from the body of read_and_validate?
//
// Execute `rustlings hint errorsn` for hints :)
/* Hint: While you won't need to *modify* anything besides `read_and_validate()`, you do need to *read* the PositiveNonZeroInteger `new()`
method. Addtionally, read the declaration for each std::io method called in `read_and_validate()`. */
use std::error;
use std::fmt;
use std::io;
// PositiveNonzeroInteger is a struct defined below the tests.
fn read_and_validate(b: &mut dyn io::BufRead) -> Result<PositiveNonzeroInteger, Box<dyn error::Error>> {
let mut line = String::new();
b.read_line(&mut line)?;
let num: i64 = line.trim().parse()?;
let answer = PositiveNonzeroInteger::new(num)?;
// answer
Ok(answer)
}
//
// Nothing below this needs to be modified
//
// This is a test helper function that turns a &str into a BufReader.
fn test_with_str(s: &str) -> Result<PositiveNonzeroInteger, Box<dyn error::Error>> {
let mut b = io::BufReader::new(s.as_bytes());
read_and_validate(&mut b)
}
#[test]
fn test_success() {
let x = test_with_str("42\n");
assert_eq!(PositiveNonzeroInteger(42), x.unwrap());
}
#[test]
fn test_not_num() {
let x = test_with_str("eleven billion\n");
assert!(x.is_err());
}
#[test]
fn test_non_positive() {
let x = test_with_str("-40\n");
assert!(x.is_err());
}
#[test]
fn test_ioerror() {
struct Broken;
impl io::Read for Broken {
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
Err(io::Error::new(io::ErrorKind::BrokenPipe, "uh-oh!"))
}
}
let mut b = io::BufReader::new(Broken);
assert!(read_and_validate(&mut b).is_err());
assert_eq!("uh-oh!", read_and_validate(&mut b).unwrap_err().to_string());
}
#[derive(PartialEq, Debug)]
struct PositiveNonzeroInteger(u64);
impl PositiveNonzeroInteger {
fn new(value: i64) -> Result<PositiveNonzeroInteger, CreationError> {
if value == 0 {
Err(CreationError::Zero)
} else if value < 0 {
Err(CreationError::Negative)
} else {
Ok(PositiveNonzeroInteger(value as u64))
}
}
}
#[test]
fn test_positive_nonzero_integer_creation() {
assert!(PositiveNonzeroInteger::new(10).is_ok());
assert_eq!(
Err(CreationError::Negative),
PositiveNonzeroInteger::new(-10)
);
assert_eq!(Err(CreationError::Zero), PositiveNonzeroInteger::new(0));
}
#[derive(PartialEq, Debug)]
enum CreationError {
Negative,
Zero,
}
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let description = match *self {
CreationError::Negative => "Number is negative",
CreationError::Zero => "Number is zero",
};
f.write_str(description)
}
}
impl error::Error for CreationError {}
| 28.491667 | 138 | 0.6537 |
d5ed6dc130814d1fad09e6963db30990c5890791 | 15,485 | //! Universal asynchronous receiver/transmitter with EasyDMA (UARTE)
//!
//! Author
//! -------------------
//!
//! * Author: Niklas Adolfsson <[email protected]>
//! * Date: March 10 2018
use core;
use core::cell::Cell;
use core::cmp::min;
use kernel;
use kernel::common::regs::{ReadOnly, ReadWrite, WriteOnly};
use nrf5x::pinmux;
const UARTE_BASE: u32 = 0x40002000;
const UARTE_MAX_BUFFER_SIZE: u32 = 0xff;
static mut BYTE: u8 = 0;
#[repr(C)]
struct UarteRegisters {
task_startrx: WriteOnly<u32, Task::Register>,
task_stoprx: WriteOnly<u32, Task::Register>,
task_starttx: WriteOnly<u32, Task::Register>,
task_stoptx: WriteOnly<u32, Task::Register>,
_reserved1: [u32; 7],
task_flush_rx: WriteOnly<u32, Task::Register>,
_reserved2: [u32; 52],
event_cts: ReadWrite<u32, Event::Register>,
event_ncts: ReadWrite<u32, Event::Register>,
_reserved3: [u32; 2],
event_endrx: ReadWrite<u32, Event::Register>,
_reserved4: [u32; 3],
event_endtx: ReadWrite<u32, Event::Register>,
event_error: ReadWrite<u32, Event::Register>,
_reserved6: [u32; 7],
event_rxto: ReadWrite<u32, Event::Register>,
_reserved7: [u32; 1],
event_rxstarted: ReadWrite<u32, Event::Register>,
event_txstarted: ReadWrite<u32, Event::Register>,
_reserved8: [u32; 1],
event_txstopped: ReadWrite<u32, Event::Register>,
_reserved9: [u32; 41],
shorts: ReadWrite<u32, Shorts::Register>,
_reserved10: [u32; 64],
intenset: ReadWrite<u32, Interrupt::Register>,
intenclr: ReadWrite<u32, Interrupt::Register>,
_reserved11: [u32; 93],
errorsrc: ReadWrite<u32, ErrorSrc::Register>,
_reserved12: [u32; 31],
enable: ReadWrite<u32, Uart::Register>,
_reserved13: [u32; 1],
pselrts: ReadWrite<u32, Psel::Register>,
pseltxd: ReadWrite<u32, Psel::Register>,
pselcts: ReadWrite<u32, Psel::Register>,
pselrxd: ReadWrite<u32, Psel::Register>,
_reserved14: [u32; 3],
baudrate: ReadWrite<u32, Baudrate::Register>,
_reserved15: [u32; 3],
rxd_ptr: ReadWrite<u32, Pointer::Register>,
rxd_maxcnt: ReadWrite<u32, Counter::Register>,
rxd_amount: ReadOnly<u32, Counter::Register>,
_reserved16: [u32; 1],
txd_ptr: ReadWrite<u32, Pointer::Register>,
txd_maxcnt: ReadWrite<u32, Counter::Register>,
txd_amount: ReadOnly<u32, Counter::Register>,
_reserved17: [u32; 7],
config: ReadWrite<u32, Config::Register>,
}
register_bitfields! [u32,
/// Start task
Task [
ENABLE OFFSET(0) NUMBITS(1)
],
/// Read event
Event [
READY OFFSET(0) NUMBITS(1)
],
/// Shortcuts
Shorts [
// Shortcut between ENDRX and STARTRX
ENDRX_STARTRX OFFSET(5) NUMBITS(1),
// Shortcut between ENDRX and STOPRX
ENDRX_STOPRX OFFSET(6) NUMBITS(1)
],
/// UART Interrupts
Interrupt [
CTS OFFSET(0) NUMBITS(1),
NCTS OFFSET(1) NUMBITS(1),
ENDRX OFFSET(4) NUMBITS(1),
ENDTX OFFSET(8) NUMBITS(1),
ERROR OFFSET(9) NUMBITS(1),
RXTO OFFSET(17) NUMBITS(1),
RXSTARTED OFFSET(19) NUMBITS(1),
TXSTARTED OFFSET(20) NUMBITS(1),
TXSTOPPED OFFSET(22) NUMBITS(1)
],
/// UART Errors
ErrorSrc [
OVERRUN OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(1),
FRAMING OFFSET(2) NUMBITS(1),
BREAK OFFSET(3) NUMBITS(1)
],
/// Enable UART
Uart [
ENABLE OFFSET(0) NUMBITS(4) [
ON = 8,
OFF = 0
]
],
/// Pin select
Psel [
// Pin number
PIN OFFSET(0) NUMBITS(5),
// Connect/Disconnect
CONNECT OFFSET(31) NUMBITS(1)
],
/// Baudrate
Baudrate [
BAUDRAUTE OFFSET(0) NUMBITS(32)
],
/// DMA pointer
Pointer [
POINTER OFFSET(0) NUMBITS(32)
],
/// Counter value
Counter [
COUNTER OFFSET(0) NUMBITS(8)
],
/// Configuration of parity and flow control
Config [
HWFC OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(3)
]
];
/// UARTE
// It should never be instanced outside this module but because a static mutable reference to it
// is exported outside this module it must be `pub`
pub struct Uarte {
regs: *const UarteRegisters,
client: Cell<Option<&'static kernel::hil::uart::Client>>,
tx_buffer: kernel::common::cells::TakeCell<'static, [u8]>,
tx_remaining_bytes: Cell<usize>,
rx_buffer: kernel::common::cells::TakeCell<'static, [u8]>,
rx_remaining_bytes: Cell<usize>,
rx_abort_in_progress: Cell<bool>,
offset: Cell<usize>,
}
#[derive(Copy, Clone)]
pub struct UARTParams {
pub baud_rate: u32,
}
/// UARTE0 handle
// This should only be accessed by the reset_handler on startup
pub static mut UARTE0: Uarte = Uarte::new();
impl Uarte {
/// Constructor
pub const fn new() -> Uarte {
Uarte {
regs: UARTE_BASE as *const UarteRegisters,
client: Cell::new(None),
tx_buffer: kernel::common::cells::TakeCell::empty(),
tx_remaining_bytes: Cell::new(0),
rx_buffer: kernel::common::cells::TakeCell::empty(),
rx_remaining_bytes: Cell::new(0),
rx_abort_in_progress: Cell::new(false),
offset: Cell::new(0),
}
}
/// Configure which pins the UART should use for txd, rxd, cts and rts
pub fn configure(
&self,
txd: pinmux::Pinmux,
rxd: pinmux::Pinmux,
cts: pinmux::Pinmux,
rts: pinmux::Pinmux,
) {
let regs = unsafe { &*self.regs };
regs.pseltxd.write(Psel::PIN.val(txd.into()));
regs.pselrxd.write(Psel::PIN.val(rxd.into()));
regs.pselcts.write(Psel::PIN.val(cts.into()));
regs.pselrts.write(Psel::PIN.val(rts.into()));
}
fn set_baud_rate(&self, baud_rate: u32) {
let regs = unsafe { &*self.regs };
match baud_rate {
1200 => regs.baudrate.set(0x0004F000),
2400 => regs.baudrate.set(0x0009D000),
4800 => regs.baudrate.set(0x0013B000),
9600 => regs.baudrate.set(0x00275000),
14400 => regs.baudrate.set(0x003AF000),
19200 => regs.baudrate.set(0x004EA000),
28800 => regs.baudrate.set(0x0075C000),
38400 => regs.baudrate.set(0x009D0000),
57600 => regs.baudrate.set(0x00EB0000),
76800 => regs.baudrate.set(0x013A9000),
115200 => regs.baudrate.set(0x01D60000),
230400 => regs.baudrate.set(0x03B00000),
250000 => regs.baudrate.set(0x04000000),
460800 => regs.baudrate.set(0x07400000),
921600 => regs.baudrate.set(0x0F000000),
1000000 => regs.baudrate.set(0x10000000),
_ => regs.baudrate.set(0x01D60000), //setting default to 115200
}
}
// Enable UART peripheral, this need to disabled for low power applications
fn enable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::ON);
}
#[allow(dead_code)]
fn disable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::OFF);
}
fn enable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDRX::SET);
}
fn enable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDTX::SET);
}
fn disable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDRX::SET);
}
fn disable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDTX::SET);
}
/// UART interrupt handler that listens for both tx_end and rx_end events
#[inline(never)]
pub fn handle_interrupt(&mut self) {
let regs = unsafe { &*self.regs };
if self.tx_ready() {
self.disable_tx_interrupts();
let regs = unsafe { &*self.regs };
regs.event_endtx.write(Event::READY::CLEAR);
let tx_bytes = regs.txd_amount.get() as usize;
let rem = match self.tx_remaining_bytes.get().checked_sub(tx_bytes) {
None => {
debug!(
"Error more bytes transmitted than requested\n \
remaining: {} \t transmitted: {}",
self.tx_remaining_bytes.get(),
tx_bytes
);
return;
}
Some(r) => r,
};
// All bytes have been transmitted
if rem == 0 {
// Signal client write done
self.client.get().map(|client| {
self.tx_buffer.take().map(|tx_buffer| {
client.transmit_complete(
tx_buffer,
kernel::hil::uart::Error::CommandComplete,
);
});
});
} else {
// Not all bytes have been transmitted then update offset and continue transmitting
self.offset.set(self.offset.get() + tx_bytes);
self.tx_remaining_bytes.set(rem);
self.set_tx_dma_pointer_to_buffer();
regs.txd_maxcnt
.write(Counter::COUNTER.val(min(rem as u32, UARTE_MAX_BUFFER_SIZE)));
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
}
if self.rx_ready() {
self.disable_rx_interrupts();
// Clear the ENDRX event
regs.event_endrx.write(Event::READY::CLEAR);
// Get the number of bytes in the buffer that was received this time
let rx_bytes = regs.rxd_amount.get() as usize;
// Check if this ENDRX is due to an abort. If so, we want to
// do the receive callback immediately.
if self.rx_abort_in_progress.get() {
self.rx_abort_in_progress.set(false);
self.client.get().map(|client| {
self.rx_buffer.take().map(|rx_buffer| {
client.receive_complete(
rx_buffer,
self.offset.get() + rx_bytes,
kernel::hil::uart::Error::CommandComplete,
);
});
});
} else {
// In the normal case, we need to either pass call the callback
// or do another read to get more bytes.
// Update how many bytes we still need to receive and
// where we are storing in the buffer.
self.rx_remaining_bytes
.set(self.rx_remaining_bytes.get().saturating_sub(rx_bytes));
self.offset.set(self.offset.get() + rx_bytes);
let rem = self.rx_remaining_bytes.get();
if rem == 0 {
// Signal client that the read is done
self.client.get().map(|client| {
self.rx_buffer.take().map(|rx_buffer| {
client.receive_complete(
rx_buffer,
self.offset.get(),
kernel::hil::uart::Error::CommandComplete,
);
});
});
} else {
// Setup how much we can read. We already made sure that
// this will fit in the buffer.
let to_read = core::cmp::min(rem, 255);
regs.rxd_maxcnt.write(Counter::COUNTER.val(to_read as u32));
// Actually do the receive.
self.set_rx_dma_pointer_to_buffer();
regs.task_startrx.write(Task::ENABLE::SET);
self.enable_rx_interrupts();
}
}
}
}
/// Transmit one byte at the time and the client is responsible for polling
/// This is used by the panic handler
pub unsafe fn send_byte(&self, byte: u8) {
let regs = &*self.regs;
self.tx_remaining_bytes.set(1);
regs.event_endtx.write(Event::READY::CLEAR);
// precaution: copy value into variable with static lifetime
BYTE = byte;
regs.txd_ptr.set((&BYTE as *const u8) as u32);
regs.txd_maxcnt.write(Counter::COUNTER.val(1));
regs.task_starttx.write(Task::ENABLE::SET);
}
/// Check if the UART transmission is done
pub fn tx_ready(&self) -> bool {
let regs = unsafe { &*self.regs };
regs.event_endtx.is_set(Event::READY)
}
/// Check if either the rx_buffer is full or the UART has timed out
pub fn rx_ready(&self) -> bool {
let regs = unsafe { &*self.regs };
regs.event_endrx.is_set(Event::READY)
}
fn set_tx_dma_pointer_to_buffer(&self) {
let regs = unsafe { &*self.regs };
self.tx_buffer.map(|tx_buffer| {
regs.txd_ptr
.set(tx_buffer[self.offset.get()..].as_ptr() as u32);
});
}
fn set_rx_dma_pointer_to_buffer(&self) {
let regs = unsafe { &*self.regs };
self.rx_buffer.map(|rx_buffer| {
regs.rxd_ptr
.set(rx_buffer[self.offset.get()..].as_ptr() as u32);
});
}
}
impl kernel::hil::uart::UART for Uarte {
fn set_client(&self, client: &'static kernel::hil::uart::Client) {
self.client.set(Some(client));
}
fn init(&self, params: kernel::hil::uart::UARTParams) {
self.enable_uart();
self.set_baud_rate(params.baud_rate);
}
fn transmit(&self, tx_data: &'static mut [u8], tx_len: usize) {
let truncated_len = min(tx_data.len(), tx_len);
if truncated_len == 0 {
return;
}
self.tx_remaining_bytes.set(tx_len);
self.offset.set(0);
self.tx_buffer.replace(tx_data);
self.set_tx_dma_pointer_to_buffer();
let regs = unsafe { &*self.regs };
regs.txd_maxcnt
.write(Counter::COUNTER.val(min(tx_len as u32, UARTE_MAX_BUFFER_SIZE)));
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
fn receive(&self, rx_buf: &'static mut [u8], rx_len: usize) {
let regs = unsafe { &*self.regs };
// truncate rx_len if necessary
let truncated_length = core::cmp::min(rx_len, rx_buf.len());
self.rx_remaining_bytes.set(truncated_length);
self.offset.set(0);
self.rx_buffer.replace(rx_buf);
self.set_rx_dma_pointer_to_buffer();
let truncated_uart_max_length = core::cmp::min(truncated_length, 255);
regs.rxd_maxcnt
.write(Counter::COUNTER.val(truncated_uart_max_length as u32));
regs.task_stoprx.write(Task::ENABLE::SET);
regs.task_startrx.write(Task::ENABLE::SET);
self.enable_rx_interrupts();
}
fn abort_receive(&self) {
// Trigger the STOPRX event to cancel the current receive call.
let regs = unsafe { &*self.regs };
self.rx_abort_in_progress.set(true);
regs.task_stoprx.write(Task::ENABLE::SET);
}
}
| 33.301075 | 99 | 0.564417 |
225d1537fb1e1c2ac76cb5325e25748bf7f743d0 | 2,823 | // Import the wasmer runtime so we can use it
use wasmer_runtime::{error, imports, instantiate, Array, Func, WasmPtr};
// Our entry point to our application
fn main() -> error::Result<()> {
// Let's get the .wasm file as bytes
let wasm_bytes = include_bytes!("../../../../shared/rust/passing-data.wasm");
// Now that we have the Wasm file as bytes, let's run it with the wasmer runtime
// Our import object, that allows exposing functions to our Wasm module.
// We're not importing anything, so make an empty import object.
let import_object = imports! {};
// Let's create an instance of Wasm module running in the wasmer-runtime
let instance = instantiate(wasm_bytes, &import_object)?;
// Lets get the context and memory of our Wasm Instance
let wasm_instance_context = instance.context();
let wasm_instance_memory = wasm_instance_context.memory(0);
// Let's get the pointer to the buffer defined by the Wasm module in the Wasm memory.
// We use the type system and the power of generics to get a function we can call
// directly with a type signature of no arguments and returning a WasmPtr<u8, Array>
let get_wasm_memory_buffer_pointer: Func<(), WasmPtr<u8, Array>> = instance
.func("get_wasm_memory_buffer_pointer")
.expect("get_wasm_memory_buffer_pointer");
let wasm_buffer_pointer = get_wasm_memory_buffer_pointer.call().unwrap();
dbg!(wasm_buffer_pointer);
// Let's write a string to the Wasm memory
let original_string = "Did you know";
println!("The original string is: {}", original_string);
// We deref our WasmPtr to get a &[Cell<u8>]
let memory_writer = wasm_buffer_pointer
.deref(wasm_instance_memory, 0, original_string.len() as u32)
.unwrap();
for (i, b) in original_string.bytes().enumerate() {
memory_writer[i].set(b);
}
// Let's call the exported function that concatenates a phrase to our string.
let add_wasm_is_cool: Func<u32, u32> = instance
.func("add_wasm_is_cool")
.expect("Wasm is cool export");
let new_string_length = add_wasm_is_cool.call(original_string.len() as u32).unwrap();
// Get our pointer again, since memory may have shifted around
let new_wasm_buffer_pointer = get_wasm_memory_buffer_pointer.call().unwrap();
// Read the string from that new pointer.
let new_string = new_wasm_buffer_pointer
.get_utf8_string(wasm_instance_memory, new_string_length)
.unwrap();
println!("The new string is: {}", new_string);
// Asserting that the returned value from the function is our expected value.
assert_eq!(new_string, "Did you know Wasm is cool!");
// Log a success message
println!("Success!");
// Return OK since everything executed successfully!
Ok(())
}
| 45.532258 | 89 | 0.698902 |
1e6c76a80250c81cfccd523fcac264cfcd32618f | 10,593 | /*
* libgit2 "diff" example - shows how to use the diff API
*
* Written by the libgit2 contributors
*
* To the extent possible under law, the author(s) have dedicated all copyright
* and related and neighboring rights to this software to the public domain
* worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#![deny(warnings)]
use docopt::Docopt;
use git2::{Diff, DiffOptions, Error, Object, ObjectType, Repository};
use git2::{DiffFindOptions, DiffFormat};
use serde_derive::Deserialize;
use std::str;
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct Args {
arg_from_oid: Option<String>,
arg_to_oid: Option<String>,
flag_patch: bool,
flag_cached: bool,
flag_nocached: bool,
flag_name_only: bool,
flag_name_status: bool,
flag_raw: bool,
flag_format: Option<String>,
flag_color: bool,
flag_no_color: bool,
flag_R: bool,
flag_text: bool,
flag_ignore_space_at_eol: bool,
flag_ignore_space_change: bool,
flag_ignore_all_space: bool,
flag_ignored: bool,
flag_untracked: bool,
flag_patience: bool,
flag_minimal: bool,
flag_stat: bool,
flag_numstat: bool,
flag_shortstat: bool,
flag_summary: bool,
flag_find_renames: Option<u16>,
flag_find_copies: Option<u16>,
flag_find_copies_harder: bool,
flag_break_rewrites: bool,
flag_unified: Option<u32>,
flag_inter_hunk_context: Option<u32>,
flag_abbrev: Option<u16>,
flag_src_prefix: Option<String>,
flag_dst_prefix: Option<String>,
flag_git_dir: Option<String>,
}
const RESET: &str = "\u{1b}[m";
const BOLD: &str = "\u{1b}[1m";
const RED: &str = "\u{1b}[31m";
const GREEN: &str = "\u{1b}[32m";
const CYAN: &str = "\u{1b}[36m";
#[derive(PartialEq, Eq, Copy, Clone)]
enum Cache {
Normal,
Only,
None,
}
fn run(args: &Args) -> Result<(), Error> {
let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or(".");
let repo = Repository::open(path)?;
// Prepare our diff options based on the arguments given
let mut opts = DiffOptions::new();
opts.reverse(args.flag_R)
.force_text(args.flag_text)
.ignore_whitespace_eol(args.flag_ignore_space_at_eol)
.ignore_whitespace_change(args.flag_ignore_space_change)
.ignore_whitespace(args.flag_ignore_all_space)
.include_ignored(args.flag_ignored)
.include_untracked(args.flag_untracked)
.patience(args.flag_patience)
.minimal(args.flag_minimal);
if let Some(amt) = args.flag_unified {
opts.context_lines(amt);
}
if let Some(amt) = args.flag_inter_hunk_context {
opts.interhunk_lines(amt);
}
if let Some(amt) = args.flag_abbrev {
opts.id_abbrev(amt);
}
if let Some(ref s) = args.flag_src_prefix {
opts.old_prefix(&s);
}
if let Some(ref s) = args.flag_dst_prefix {
opts.new_prefix(&s);
}
if let Some("diff-index") = args.flag_format.as_ref().map(|s| &s[..]) {
opts.id_abbrev(40);
}
// Prepare the diff to inspect
let t1 = tree_to_treeish(&repo, args.arg_from_oid.as_ref())?;
let t2 = tree_to_treeish(&repo, args.arg_to_oid.as_ref())?;
let head = tree_to_treeish(&repo, Some(&"HEAD".to_string()))?.unwrap();
let mut diff = match (t1, t2, args.cache()) {
(Some(t1), Some(t2), _) => {
repo.diff_tree_to_tree(t1.as_tree(), t2.as_tree(), Some(&mut opts))?
}
(t1, None, Cache::None) => {
let t1 = t1.unwrap_or(head);
repo.diff_tree_to_workdir(t1.as_tree(), Some(&mut opts))?
}
(t1, None, Cache::Only) => {
let t1 = t1.unwrap_or(head);
repo.diff_tree_to_index(t1.as_tree(), None, Some(&mut opts))?
}
(Some(t1), None, _) => {
repo.diff_tree_to_workdir_with_index(t1.as_tree(), Some(&mut opts))?
}
(None, None, _) => repo.diff_index_to_workdir(None, Some(&mut opts))?,
(None, Some(_), _) => unreachable!(),
};
// Apply rename and copy detection if requested
if args.flag_break_rewrites
|| args.flag_find_copies_harder
|| args.flag_find_renames.is_some()
|| args.flag_find_copies.is_some()
{
let mut opts = DiffFindOptions::new();
if let Some(t) = args.flag_find_renames {
opts.rename_threshold(t);
opts.renames(true);
}
if let Some(t) = args.flag_find_copies {
opts.copy_threshold(t);
opts.copies(true);
}
opts.copies_from_unmodified(args.flag_find_copies_harder)
.rewrites(args.flag_break_rewrites);
diff.find_similar(Some(&mut opts))?;
}
// Generate simple output
let stats = args.flag_stat | args.flag_numstat | args.flag_shortstat | args.flag_summary;
if stats {
print_stats(&diff, args)?;
}
if args.flag_patch || !stats {
if args.color() {
print!("{}", RESET);
}
let mut last_color = None;
diff.print(args.diff_format(), |_delta, _hunk, line| {
if args.color() {
let next = match line.origin() {
'+' => Some(GREEN),
'-' => Some(RED),
'>' => Some(GREEN),
'<' => Some(RED),
'F' => Some(BOLD),
'H' => Some(CYAN),
_ => None,
};
if args.color() && next != last_color {
if last_color == Some(BOLD) || next == Some(BOLD) {
print!("{}", RESET);
}
print!("{}", next.unwrap_or(RESET));
last_color = next;
}
}
match line.origin() {
'+' | '-' | ' ' => print!("{}", line.origin()),
_ => {}
}
print!("{}", str::from_utf8(line.content()).unwrap());
true
})?;
if args.color() {
print!("{}", RESET);
}
}
Ok(())
}
fn print_stats(diff: &Diff, args: &Args) -> Result<(), Error> {
let stats = diff.stats()?;
let mut format = git2::DiffStatsFormat::NONE;
if args.flag_stat {
format |= git2::DiffStatsFormat::FULL;
}
if args.flag_shortstat {
format |= git2::DiffStatsFormat::SHORT;
}
if args.flag_numstat {
format |= git2::DiffStatsFormat::NUMBER;
}
if args.flag_summary {
format |= git2::DiffStatsFormat::INCLUDE_SUMMARY;
}
let buf = stats.to_buf(format, 80)?;
print!("{}", str::from_utf8(&*buf).unwrap());
Ok(())
}
fn tree_to_treeish<'a>(
repo: &'a Repository,
arg: Option<&String>,
) -> Result<Option<Object<'a>>, Error> {
let arg = match arg {
Some(s) => s,
None => return Ok(None),
};
let obj = repo.revparse_single(arg)?;
let tree = obj.peel(ObjectType::Tree)?;
Ok(Some(tree))
}
impl Args {
fn cache(&self) -> Cache {
if self.flag_cached {
Cache::Only
} else if self.flag_nocached {
Cache::None
} else {
Cache::Normal
}
}
fn color(&self) -> bool {
self.flag_color && !self.flag_no_color
}
fn diff_format(&self) -> DiffFormat {
if self.flag_patch {
DiffFormat::Patch
} else if self.flag_name_only {
DiffFormat::NameOnly
} else if self.flag_name_status {
DiffFormat::NameStatus
} else if self.flag_raw {
DiffFormat::Raw
} else {
match self.flag_format.as_ref().map(|s| &s[..]) {
Some("name") => DiffFormat::NameOnly,
Some("name-status") => DiffFormat::NameStatus,
Some("raw") => DiffFormat::Raw,
Some("diff-index") => DiffFormat::Raw,
_ => DiffFormat::Patch,
}
}
}
}
fn main() {
const USAGE: &str = "
usage: diff [options] [<from-oid> [<to-oid>]]
Options:
-p, --patch show output in patch format
--cached use staged changes as diff
--nocached do not use staged changes
--name-only show only names of changed files
--name-status show only names and status changes
--raw generate the raw format
--format=<format> specify format for stat summary
--color use color output
--no-color never use color output
-R swap two inputs
-a, --text treat all files as text
--ignore-space-at-eol ignore changes in whitespace at EOL
-b, --ignore-space-change ignore changes in amount of whitespace
-w, --ignore-all-space ignore whitespace when comparing lines
--ignored show ignored files as well
--untracked show untracked files
--patience generate diff using the patience algorithm
--minimal spend extra time to find smallest diff
--stat generate a diffstat
--numstat similar to --stat, but more machine friendly
--shortstat only output last line of --stat
--summary output condensed summary of header info
-M, --find-renames <n> set threshold for findind renames (default 50)
-C, --find-copies <n> set threshold for finding copies (default 50)
--find-copies-harder inspect unmodified files for sources of copies
-B, --break-rewrites break complete rewrite changes into pairs
-U, --unified <n> lints of context to show
--inter-hunk-context <n> maximum lines of change between hunks
--abbrev <n> length to abbreviate commits to
--src-prefix <s> show given source prefix instead of 'a/'
--dst-prefix <s> show given destinction prefix instead of 'b/'
--git-dir <path> path to git repository to use
-h, --help show this message
";
let args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
match run(&args) {
Ok(()) => {}
Err(e) => println!("error: {}", e),
}
}
| 33.951923 | 93 | 0.561408 |
01a01bf14ba62f0ba9332bee6476e3f3550f62fd | 6,347 | // Copyright (c) 2019 - 2020 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Controls North runtime instances
use crate::{
process_assert::ProcessAssert,
util::{cargo_bin, CaptureReader},
};
use color_eyre::eyre::{eyre, Error, Result, WrapErr};
use log::{error, info};
use std::{path::Path, process::Stdio};
use tokio::{
process::{Child, Command},
time,
time::timeout,
};
const TIMEOUT: time::Duration = time::Duration::from_secs(3);
async fn nstar(command: &str) -> Result<()> {
let output = Command::new(cargo_bin("nstar"))
.arg(&command)
.output()
.await?;
// TODO sometimes the shutdown command won't get a reply
if command != "shutdown" && !output.status.success() {
let error_msg = String::from_utf8(output.stderr)?;
error!("Failed to run nstar {}: {}", command, error_msg);
Err(eyre!("Failed to run nstar {}: {}", command, error_msg))
} else {
info!("nstar {}: {}", command, String::from_utf8(output.stdout)?);
Ok(())
}
}
/// A running instance of north.
pub struct Runtime {
child: Child,
output: CaptureReader,
}
impl Runtime {
/// Launches an instance of north
///
/// # Examples
///
/// ```no_run
/// use color_eyre::eyre::Result;
/// use north_tests::runtime::Runtime;
///
/// #[tokio::main]
/// async fn main() -> Result<()> {
/// let north = Runtime::launch().await?;
/// Ok(())
/// }
/// ```
pub async fn launch() -> Result<Runtime, Error> {
let launch = async move {
let mut child = Command::new(cargo_bin("north"))
.current_dir("..")
.stdout(Stdio::piped())
.kill_on_drop(true)
.spawn()
.wrap_err("Could not spawn north")?;
let stdout = child
.stdout
.take()
.ok_or_else(|| eyre!("Cannot get stdout of child"))?;
let mut output = CaptureReader::new(stdout).await;
output
.captures("Starting console on localhost:4200")
.await
.wrap_err("Failed to open north console")?;
Ok::<Runtime, Error>(Runtime { child, output })
};
timeout(TIMEOUT, launch)
.await
.wrap_err("launching north timed out")
.and_then(|result| result)
}
pub async fn expect_output(&mut self, regex: &str) -> Result<Vec<String>> {
let search = self.output.captures(regex);
timeout(TIMEOUT, search)
.await
.wrap_err_with(|| format!("Search for pattern \"{}\" timed out", regex))
.and_then(|res| res)?
.ok_or_else(|| eyre!("Pattern not found"))
}
pub async fn start(&mut self, name: &str) -> Result<ProcessAssert> {
let start = async move {
nstar(&format!("start {}", name)).await?;
// Get container's pid out north's stdout
let captures = self
.output
.captures(&format!("\\[(\\d+)\\] {}: 1: ", name))
.await?
.ok_or_else(|| eyre!("Couldn't find {}'s pid", name))?;
let pid = captures
.into_iter()
.nth(1)
.unwrap()
.parse::<u64>()
.wrap_err(format!("Could not capture {}'s PID", name))?;
Ok::<ProcessAssert, Error>(ProcessAssert::new(pid))
};
timeout(TIMEOUT, start)
.await
.wrap_err_with(|| format!("Failed to start container {}", name))
.and_then(|result| result)
}
pub async fn stop(&mut self, container_name: &str) -> Result<()> {
let stop = async move {
nstar(&format!("stop {}", container_name)).await?;
// Check that the container stopped
self.output
.captures(&format!("Stopped {}", container_name))
.await
.wrap_err(format!("Failed to wait for {} to stop", container_name))?;
Ok::<(), Error>(())
};
timeout(TIMEOUT, stop)
.await
.wrap_err_with(|| format!("Failed to stop {}", container_name))
.and_then(|result| result)
}
pub async fn try_stop(&mut self, container_name: &str) -> Result<()> {
let command = format!("stop {}", container_name);
timeout(TIMEOUT, nstar(&command))
.await
.wrap_err_with(|| format!("Failed to stop {}", container_name))
.and_then(|result| result)
}
pub async fn install(&mut self, npk: &Path) -> Result<()> {
let command = format!("install {}", npk.display());
timeout(TIMEOUT, nstar(&command))
.await
.wrap_err("Installing npk timed out")
.and_then(|res| res)
}
pub async fn uninstall(&mut self, name: &str, version: &str) -> Result<()> {
let command = format!("uninstall {} {}", name, version);
timeout(TIMEOUT, nstar(&command))
.await
.wrap_err("Uninstalling npk timed out")
.and_then(|res| res)
}
pub async fn shutdown(&mut self) -> Result<()> {
let shutdown = async {
nstar("shutdown").await?;
// Check that the shutdown request was received
self.output
.captures("Shutting down...")
.await
.wrap_err("Shutdown request was not received")?;
self.child.wait().await?;
Ok::<(), color_eyre::eyre::Error>(())
};
timeout(TIMEOUT, shutdown)
.await
.wrap_err("Shutting down runtime timed out")
.and_then(|res| res)
}
}
| 32.055556 | 85 | 0.534426 |
9c77a32b123b9a6c6ec8765f64e9306199cf3cca | 1,836 | #[cfg(test)]
mod tests {
use crate::{solve_puzzle, get_ints, solve_puzzle_2};
use std::io::Error;
use utils::get_file;
#[test]
fn test_get_ints() -> Result<(), Error> {
let ints = get_ints(&get_file(file!(), "1.txt"));
assert_eq!(vec!(1721, 979, 366, 299, 675, 1456), ints?);
Ok(())
}
#[test]
fn expense_report() -> Result<(), Error> {
let puzzle_result = 514579;
let result = solve_puzzle(&get_file(file!(), "1.txt"));
if result.is_err() {
return Err(result.err().unwrap());
}
assert_eq!(true, result.is_ok());
assert_eq!(puzzle_result, result.unwrap());
return Ok(());
}
#[test]
fn expense_report_2() -> Result<(), Error> {
let puzzle_result = 241861950;
let result = solve_puzzle_2(&get_file(file!(), "1.txt"));
if result.is_err() {
return Err(result.err().unwrap());
}
assert_eq!(true, result.is_ok());
assert_eq!(puzzle_result, result.unwrap());
return Ok(());
}
#[test]
fn real_expense_report() -> Result<(), Error> {
let puzzle_result = 935419;
let result = solve_puzzle(&get_file(file!(),"2.txt"));
if result.is_err() {
return Err(result.err().unwrap());
}
assert_eq!(true, result.is_ok());
assert_eq!(puzzle_result, result.unwrap());
return Ok(());
}
#[test]
fn real_expense_report_2() -> Result<(), Error> {
let puzzle_result = 49880012;
let result = solve_puzzle_2(&get_file(file!(),"2.txt"));
if result.is_err() {
return Err(result.err().unwrap());
}
assert_eq!(true, result.is_ok());
assert_eq!(puzzle_result, result.unwrap());
return Ok(());
}
} | 28.246154 | 65 | 0.535948 |
0975c5bd05cf76b0539d61551aae6f5246750280 | 1,979 | use crate::{context::Context, graphics::backend::State};
const FONT_SIZE: f32 = 30.0;
const COLOR: wgpu::Color = wgpu::Color::GREEN;
// struct DebugInfo {
// text: String,
// }
pub struct Debugger {
queue: Vec<String>,
}
impl Debugger {
pub fn new() -> Self {
Debugger { queue: Vec::new() }
}
pub fn queue(&mut self, text: String) {
self.queue.push(text)
}
pub fn render(&mut self, gfx: &mut State) {
// Store current row's width
let mut end_x = 0.0;
let mut y = 0.0;
let max_width = gfx.size.width as f32;
self.queue.iter().for_each(|d| {
// multiply d.len by font size for size of the sentence
let debug_len = d.len() as f32 * FONT_SIZE;
// add to current row len for a tentative size
let predicted_len = debug_len + end_x;
// println!("The len of {} is {}", d, debug_len);
// println!("Max len is {}", max_width);
match predicted_len.partial_cmp(&max_width) {
// if the tentative size is less than or equal to max width
Some(std::cmp::Ordering::Less) | Some(std::cmp::Ordering::Equal) => {
// draw it at current x
gfx.draw_text(d, end_x, y, COLOR, FONT_SIZE);
// and set end x to the tentative len
end_x = predicted_len
}
// Otherwise
_ => {
// increment current y
y += FONT_SIZE;
// draw at x=0, y=current_y
gfx.draw_text(d, 0.0, y, COLOR, FONT_SIZE);
// then set current row len to the debug_len
end_x = debug_len
}
}
});
// Make sure to clear queue lol
self.queue.clear();
}
}
impl Default for Debugger {
fn default() -> Self {
Self::new()
}
}
| 29.984848 | 85 | 0.496716 |
ef1c17f4f4ea7317c0daa970c8d38b0ab2d640f6 | 553 | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
mod balance;
mod balance_diff;
mod conflict;
mod error;
mod migration;
mod output_diff;
mod receipt;
mod treasury_diff;
mod treasury_output;
mod unspent;
pub use balance::Balance;
pub use balance_diff::{BalanceDiff, BalanceDiffs};
pub use conflict::ConflictReason;
pub use error::Error;
pub use migration::Migration;
pub use output_diff::OutputDiff;
pub use receipt::Receipt;
pub use treasury_diff::TreasuryDiff;
pub use treasury_output::TreasuryOutput;
pub use unspent::Unspent;
| 22.12 | 50 | 0.79566 |
18e5f4c5163d066cd65e9a0e6568bfa9553060e7 | 18,953 | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
super::BitFlags as _,
crate::{base_packages::BasePackages, index::PackageIndex},
async_trait::async_trait,
fidl::endpoints::ServerEnd,
fidl_fuchsia_io::{
NodeAttributes, NodeMarker, DIRENT_TYPE_DIRECTORY, INO_UNKNOWN, MODE_TYPE_DIRECTORY,
OPEN_FLAG_APPEND, OPEN_FLAG_CREATE, OPEN_FLAG_CREATE_IF_ABSENT, OPEN_FLAG_POSIX_DEPRECATED,
OPEN_FLAG_POSIX_EXECUTABLE, OPEN_FLAG_POSIX_WRITABLE, OPEN_FLAG_TRUNCATE,
OPEN_RIGHT_WRITABLE,
},
fuchsia_hash::Hash,
fuchsia_pkg::{PackageName, PackageVariant},
fuchsia_zircon as zx,
futures::lock::Mutex,
std::{
collections::{BTreeMap, HashMap},
str::FromStr,
sync::Arc,
},
system_image::NonStaticAllowList,
vfs::{
common::send_on_open_with_error,
directory::{
connection::io1::DerivedConnection,
dirents_sink,
entry::{DirectoryEntry, EntryInfo},
entry_container::Directory,
immutable::connection::io1::ImmutableConnection,
traversal_position::TraversalPosition,
},
execution_scope::ExecutionScope,
path::Path,
},
};
mod variants;
use variants::PkgfsPackagesVariants;
#[derive(Debug)]
pub struct PkgfsPackages {
base_packages: Arc<BasePackages>,
non_base_packages: Arc<Mutex<PackageIndex>>,
non_static_allow_list: Arc<NonStaticAllowList>,
blobfs: blobfs::Client,
}
impl PkgfsPackages {
pub fn new(
base_packages: Arc<BasePackages>,
non_base_packages: Arc<Mutex<PackageIndex>>,
non_static_allow_list: Arc<NonStaticAllowList>,
blobfs: blobfs::Client,
) -> Self {
Self { base_packages, non_base_packages, non_static_allow_list, blobfs }
}
async fn packages(&self) -> HashMap<PackageName, HashMap<PackageVariant, Hash>> {
let mut res: HashMap<PackageName, HashMap<PackageVariant, Hash>> = HashMap::new();
// First populate with base packages.
for (path, hash) in self.base_packages.paths_to_hashes() {
let name = path.name().to_owned();
let variant = path.variant().to_owned();
res.entry(name).or_default().insert(variant, *hash);
}
// Then fill in allowed dynamic packages, which may override existing base packages.
let active_packages = self.non_base_packages.lock().await.active_packages();
for (path, hash) in active_packages {
if !self.non_static_allow_list.allows(path.name()) {
continue;
}
let name = path.name().to_owned();
let variant = path.variant().to_owned();
res.entry(name).or_default().insert(variant, hash);
}
res
}
async fn package_variants(&self, name: &PackageName) -> Option<HashMap<PackageVariant, Hash>> {
self.packages().await.remove(name)
}
async fn directory_entries(&self) -> BTreeMap<String, super::DirentType> {
self.packages()
.await
.into_iter()
.map(|(k, _)| (k.into(), super::DirentType::Directory))
.collect()
}
}
impl DirectoryEntry for PkgfsPackages {
fn open(
self: Arc<Self>,
scope: ExecutionScope,
flags: u32,
mode: u32,
mut path: Path,
server_end: ServerEnd<NodeMarker>,
) {
let flags = flags.unset(OPEN_FLAG_POSIX_WRITABLE);
let flags = if flags.is_any_set(OPEN_FLAG_POSIX_DEPRECATED) {
flags.unset(OPEN_FLAG_POSIX_DEPRECATED).set(OPEN_FLAG_POSIX_EXECUTABLE)
} else {
flags
};
// This directory and all child nodes are read-only
if flags
& (OPEN_RIGHT_WRITABLE
| OPEN_FLAG_CREATE
| OPEN_FLAG_CREATE_IF_ABSENT
| OPEN_FLAG_TRUNCATE
| OPEN_FLAG_APPEND)
!= 0
{
return send_on_open_with_error(flags, server_end, zx::Status::NOT_SUPPORTED);
}
scope.clone().spawn(async move {
match path.next().map(PackageName::from_str) {
None => ImmutableConnection::create_connection(scope, self, flags, server_end),
Some(Ok(package_name)) => match self.package_variants(&package_name).await {
Some(variants) => {
Arc::new(PkgfsPackagesVariants::new(variants, self.blobfs.clone()))
.open(scope, flags, mode, path, server_end)
}
None => send_on_open_with_error(flags, server_end, zx::Status::NOT_FOUND),
},
Some(Err(_)) => {
// Names that are not valid package names can't exist in this directory.
send_on_open_with_error(flags, server_end, zx::Status::NOT_FOUND)
}
}
})
}
fn entry_info(&self) -> EntryInfo {
EntryInfo::new(INO_UNKNOWN, DIRENT_TYPE_DIRECTORY)
}
}
#[async_trait]
impl Directory for PkgfsPackages {
async fn read_dirents<'a>(
&'a self,
pos: &'a TraversalPosition,
sink: Box<(dyn dirents_sink::Sink + 'static)>,
) -> Result<(TraversalPosition, Box<(dyn dirents_sink::Sealed + 'static)>), zx::Status> {
// If directory contents changes in between a client making paginated
// fuchsia.io/Directory.ReadDirents calls, the client may not see a consistent snapshot
// of the directory contents.
super::read_dirents(&self.directory_entries().await, pos, sink).await
}
fn register_watcher(
self: Arc<Self>,
_: ExecutionScope,
_: u32,
_: fidl::AsyncChannel,
) -> Result<(), zx::Status> {
Err(zx::Status::NOT_SUPPORTED)
}
// `register_watcher` is unsupported so this is a no-op.
fn unregister_watcher(self: Arc<Self>, _: usize) {}
async fn get_attrs(&self) -> Result<NodeAttributes, zx::Status> {
Ok(NodeAttributes {
mode: MODE_TYPE_DIRECTORY,
id: 1,
content_size: 0,
storage_size: 0,
link_count: 1,
creation_time: 0,
modification_time: 0,
})
}
fn close(&self) -> Result<(), zx::Status> {
Ok(())
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{compat::pkgfs::testing::FakeSink, index::register_dynamic_package},
assert_matches::assert_matches,
fidl_fuchsia_io::{OPEN_RIGHT_EXECUTABLE, OPEN_RIGHT_READABLE},
fuchsia_pkg::PackagePath,
fuchsia_pkg_testing::{blobfs::Fake as FakeBlobfs, PackageBuilder},
maplit::{convert_args, hashmap},
std::collections::HashSet,
};
impl PkgfsPackages {
pub fn new_test(
base_packages: Vec<(PackagePath, Hash)>,
non_static_allow_list: NonStaticAllowList,
) -> (Arc<Self>, Arc<Mutex<PackageIndex>>) {
let (blobfs, _) = blobfs::Client::new_mock();
let index = Arc::new(Mutex::new(PackageIndex::new_test()));
(
Arc::new(PkgfsPackages::new(
Arc::new(BasePackages::new_test_only(
// PkgfsPackages only uses the path-hash mapping, so tests do not need to
// populate the blob hashes.
HashSet::new(),
base_packages,
)),
Arc::clone(&index),
Arc::new(non_static_allow_list),
blobfs,
)),
index,
)
}
fn proxy(self: &Arc<Self>, flags: u32) -> fidl_fuchsia_io::DirectoryProxy {
let (proxy, server_end) =
fidl::endpoints::create_proxy::<fidl_fuchsia_io::DirectoryMarker>().unwrap();
vfs::directory::entry::DirectoryEntry::open(
Arc::clone(&self),
ExecutionScope::new(),
flags,
0,
Path::dot(),
server_end.into_channel().into(),
);
proxy
}
}
macro_rules! package_name_hashmap {
($($inner:tt)*) => {
convert_args!(
keys = |s| PackageName::from_str(s).unwrap(),
hashmap!($($inner)*)
)
};
}
macro_rules! package_variant_hashmap {
($($inner:tt)*) => {
convert_args!(
keys = |s| PackageVariant::from_str(s).unwrap(),
hashmap!($($inner)*)
)
};
}
fn non_static_allow_list(names: &[&str]) -> NonStaticAllowList {
NonStaticAllowList::parse(names.join("\n").as_bytes()).unwrap()
}
fn hash(n: u8) -> Hash {
Hash::from([n; 32])
}
fn path(name: &str, variant: &str) -> PackagePath {
PackagePath::from_name_and_variant(name.parse().unwrap(), variant.parse().unwrap())
}
#[fuchsia_async::run_singlethreaded(test)]
async fn minimal_lifecycle() {
let (pkgfs_packages, _package_index) =
PkgfsPackages::new_test(vec![], non_static_allow_list(&[]));
drop(pkgfs_packages);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn packages_listing_unions_indices() {
let (pkgfs_packages, package_index) = PkgfsPackages::new_test(
vec![(path("static", "0"), hash(0))],
non_static_allow_list(&["dynamic"]),
);
register_dynamic_package(&package_index, path("dynamic", "0"), hash(1)).await;
register_dynamic_package(&package_index, path("dynamic", "1"), hash(2)).await;
assert_eq!(
pkgfs_packages.packages().await,
package_name_hashmap!(
"static" => package_variant_hashmap!{
"0" => hash(0),
},
"dynamic" => package_variant_hashmap!{
"0" => hash(1),
"1" => hash(2),
},
)
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn packages_listing_dynamic_overrides_static() {
let (pkgfs_packages, package_index) = PkgfsPackages::new_test(
vec![(path("replaceme", "0"), hash(0)), (path("replaceme", "butnotme"), hash(1))],
non_static_allow_list(&["replaceme"]),
);
register_dynamic_package(&package_index, path("replaceme", "0"), hash(10)).await;
assert_eq!(
pkgfs_packages.packages().await,
package_name_hashmap!(
"replaceme" => package_variant_hashmap!{
"0" => hash(10),
"butnotme" => hash(1),
},
)
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn packages_listing_ignores_disallowed_dynamic_packages() {
let (pkgfs_packages, package_index) = PkgfsPackages::new_test(
vec![(path("allowed", "0"), hash(0)), (path("static", "0"), hash(1))],
non_static_allow_list(&["allowed"]),
);
register_dynamic_package(&package_index, path("allowed", "0"), hash(10)).await;
register_dynamic_package(&package_index, path("static", "0"), hash(11)).await;
register_dynamic_package(&package_index, path("dynamic", "0"), hash(12)).await;
assert_eq!(
pkgfs_packages.packages().await,
package_name_hashmap!(
"allowed" => package_variant_hashmap!{
"0" => hash(10),
},
"static" => package_variant_hashmap!{
"0" => hash(1),
},
)
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn readdir_empty() {
let (pkgfs_packages, _package_index) =
PkgfsPackages::new_test(vec![], non_static_allow_list(&[]));
// Given adequate buffer space, the only entry is itself (".").
let (pos, sealed) = Directory::read_dirents(
&*pkgfs_packages,
&TraversalPosition::Start,
Box::new(FakeSink::new(100)),
)
.await
.expect("read_dirents failed");
assert_eq!(
FakeSink::from_sealed(sealed).entries,
vec![(".".to_owned(), EntryInfo::new(INO_UNKNOWN, DIRENT_TYPE_DIRECTORY)),]
);
assert_eq!(pos, TraversalPosition::End);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn readdir_enumerates_all_allowed_entries() {
let (pkgfs_packages, package_index) = PkgfsPackages::new_test(
vec![
(path("allowed", "0"), hash(0)),
(path("static", "0"), hash(1)),
(path("static", "1"), hash(2)),
(path("still", "static"), hash(3)),
],
non_static_allow_list(&["allowed", "dynonly", "missing"]),
);
register_dynamic_package(&package_index, path("allowed", "dynamic-package"), hash(10))
.await;
register_dynamic_package(&package_index, path("static", "0"), hash(11)).await;
register_dynamic_package(&package_index, path("dynamic", "0"), hash(12)).await;
register_dynamic_package(&package_index, path("dynonly", "0"), hash(14)).await;
let (pos, sealed) = Directory::read_dirents(
&*pkgfs_packages,
&TraversalPosition::Start,
Box::new(FakeSink::new(100)),
)
.await
.expect("read_dirents failed");
assert_eq!(
FakeSink::from_sealed(sealed).entries,
vec![
(".".to_owned(), EntryInfo::new(INO_UNKNOWN, DIRENT_TYPE_DIRECTORY)),
("allowed".to_owned(), EntryInfo::new(INO_UNKNOWN, DIRENT_TYPE_DIRECTORY)),
("dynonly".to_owned(), EntryInfo::new(INO_UNKNOWN, DIRENT_TYPE_DIRECTORY)),
("static".to_owned(), EntryInfo::new(INO_UNKNOWN, DIRENT_TYPE_DIRECTORY)),
("still".to_owned(), EntryInfo::new(INO_UNKNOWN, DIRENT_TYPE_DIRECTORY)),
]
);
assert_eq!(pos, TraversalPosition::End);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn open_rejects_invalid_name() {
let (pkgfs_packages, _package_index) =
PkgfsPackages::new_test(vec![], non_static_allow_list(&[]));
let proxy = pkgfs_packages.proxy(OPEN_RIGHT_READABLE);
assert_matches!(
io_util::directory::open_directory(
&proxy,
"invalidname-!@#$%^&*()+=",
OPEN_RIGHT_READABLE
)
.await,
Err(io_util::node::OpenError::OpenError(zx::Status::NOT_FOUND))
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn open_rejects_missing_package() {
let (pkgfs_packages, _package_index) =
PkgfsPackages::new_test(vec![], non_static_allow_list(&[]));
let proxy = pkgfs_packages.proxy(OPEN_RIGHT_READABLE);
assert_matches!(
io_util::directory::open_directory(&proxy, "missing", OPEN_RIGHT_READABLE).await,
Err(io_util::node::OpenError::OpenError(zx::Status::NOT_FOUND))
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn open_opens_static_package_variants() {
let (pkgfs_packages, _package_index) = PkgfsPackages::new_test(
vec![(path("static", "0"), hash(0))],
non_static_allow_list(&[]),
);
let proxy = pkgfs_packages.proxy(OPEN_RIGHT_READABLE);
assert_matches!(
io_util::directory::open_directory(&proxy, "static", OPEN_RIGHT_READABLE).await,
Ok(_)
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn open_opens_allowed_dynamic_variants() {
let (pkgfs_packages, package_index) =
PkgfsPackages::new_test(vec![], non_static_allow_list(&["dynamic"]));
let proxy = pkgfs_packages.proxy(OPEN_RIGHT_READABLE);
assert_matches!(
io_util::directory::open_directory(&proxy, "dynamic", OPEN_RIGHT_READABLE).await,
Err(io_util::node::OpenError::OpenError(zx::Status::NOT_FOUND))
);
register_dynamic_package(&package_index, path("dynamic", "0"), hash(0)).await;
assert_matches!(
io_util::directory::open_directory(&proxy, "dynamic", OPEN_RIGHT_READABLE).await,
Ok(_)
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn open_opens_path_within_known_package_variant() {
let package_index = Arc::new(Mutex::new(PackageIndex::new_test()));
let (blobfs_fake, blobfs_client) = FakeBlobfs::new();
let pkgfs_packages = Arc::new(PkgfsPackages::new(
Arc::new(BasePackages::new_test_only(HashSet::new(), vec![])),
Arc::clone(&package_index),
Arc::new(non_static_allow_list(&["dynamic"])),
blobfs_client,
));
let proxy = pkgfs_packages.proxy(OPEN_RIGHT_READABLE);
let package = PackageBuilder::new("dynamic")
.add_resource_at("meta/message", &b"yes"[..])
.build()
.await
.expect("created pkg");
let (metafar_blob, _) = package.contents();
blobfs_fake.add_blob(metafar_blob.merkle, metafar_blob.contents);
register_dynamic_package(&package_index, path("dynamic", "0"), metafar_blob.merkle).await;
let file =
io_util::directory::open_file(&proxy, "dynamic/0/meta/message", OPEN_RIGHT_READABLE)
.await
.unwrap();
let message = io_util::file::read_to_string(&file).await.unwrap();
assert_eq!(message, "yes");
}
#[fuchsia_async::run_singlethreaded(test)]
async fn open_unsets_posix_writable() {
let (pkgfs_packages, _package_index) =
PkgfsPackages::new_test(vec![], non_static_allow_list(&[]));
let proxy = pkgfs_packages.proxy(OPEN_RIGHT_READABLE | OPEN_FLAG_POSIX_WRITABLE);
let (status, flags) = proxy.get_flags().await.unwrap();
let () = zx::Status::ok(status).unwrap();
assert_eq!(flags, OPEN_RIGHT_READABLE);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn open_converts_posix_deprecated_to_posix_exec() {
let (pkgfs_packages, _package_index) =
PkgfsPackages::new_test(vec![], non_static_allow_list(&[]));
let proxy = pkgfs_packages.proxy(OPEN_RIGHT_READABLE | OPEN_FLAG_POSIX_DEPRECATED);
let (status, flags) = proxy.get_flags().await.unwrap();
let () = zx::Status::ok(status).unwrap();
assert_eq!(flags, OPEN_RIGHT_READABLE | OPEN_RIGHT_EXECUTABLE);
}
}
| 35.426168 | 99 | 0.579697 |
26cf393d6e968baa376bab83a092d67b7a974234 | 6,217 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::EFUSE_SYSCFG2 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct SYS_EEROM_ANAPAR_SPLL_24_15R {
bits: u16,
}
impl SYS_EEROM_ANAPAR_SPLL_24_15R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct SYS_EEROM_ANAPAR_SPLL_05_02R {
bits: u8,
}
impl SYS_EEROM_ANAPAR_SPLL_05_02R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct SYS_EEROM_XTAL_STEL_SELR {
bits: u8,
}
impl SYS_EEROM_XTAL_STEL_SELR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct SYS_SYS_EEROM_XTAL_FREQ_SELR {
bits: u8,
}
impl SYS_SYS_EEROM_XTAL_FREQ_SELR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _SYS_EEROM_ANAPAR_SPLL_24_15W<'a> {
w: &'a mut W,
}
impl<'a> _SYS_EEROM_ANAPAR_SPLL_24_15W<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 1023;
const OFFSET: u8 = 21;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _SYS_EEROM_ANAPAR_SPLL_05_02W<'a> {
w: &'a mut W,
}
impl<'a> _SYS_EEROM_ANAPAR_SPLL_05_02W<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 15;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _SYS_EEROM_XTAL_STEL_SELW<'a> {
w: &'a mut W,
}
impl<'a> _SYS_EEROM_XTAL_STEL_SELW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 12;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _SYS_SYS_EEROM_XTAL_FREQ_SELW<'a> {
w: &'a mut W,
}
impl<'a> _SYS_SYS_EEROM_XTAL_FREQ_SELW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 21:30"]
#[inline]
pub fn sys_eerom_anapar_spll_24_15(&self) -> SYS_EEROM_ANAPAR_SPLL_24_15R {
let bits = {
const MASK: u16 = 1023;
const OFFSET: u8 = 21;
((self.bits >> OFFSET) & MASK as u32) as u16
};
SYS_EEROM_ANAPAR_SPLL_24_15R { bits }
}
#[doc = "Bits 16:19"]
#[inline]
pub fn sys_eerom_anapar_spll_05_02(&self) -> SYS_EEROM_ANAPAR_SPLL_05_02R {
let bits = {
const MASK: u8 = 15;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u8
};
SYS_EEROM_ANAPAR_SPLL_05_02R { bits }
}
#[doc = "Bits 12:13"]
#[inline]
pub fn sys_eerom_xtal_stel_sel(&self) -> SYS_EEROM_XTAL_STEL_SELR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) as u8
};
SYS_EEROM_XTAL_STEL_SELR { bits }
}
#[doc = "Bits 8:9"]
#[inline]
pub fn sys_sys_eerom_xtal_freq_sel(&self) -> SYS_SYS_EEROM_XTAL_FREQ_SELR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) as u8
};
SYS_SYS_EEROM_XTAL_FREQ_SELR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 21:30"]
#[inline]
pub fn sys_eerom_anapar_spll_24_15(&mut self) -> _SYS_EEROM_ANAPAR_SPLL_24_15W {
_SYS_EEROM_ANAPAR_SPLL_24_15W { w: self }
}
#[doc = "Bits 16:19"]
#[inline]
pub fn sys_eerom_anapar_spll_05_02(&mut self) -> _SYS_EEROM_ANAPAR_SPLL_05_02W {
_SYS_EEROM_ANAPAR_SPLL_05_02W { w: self }
}
#[doc = "Bits 12:13"]
#[inline]
pub fn sys_eerom_xtal_stel_sel(&mut self) -> _SYS_EEROM_XTAL_STEL_SELW {
_SYS_EEROM_XTAL_STEL_SELW { w: self }
}
#[doc = "Bits 8:9"]
#[inline]
pub fn sys_sys_eerom_xtal_freq_sel(&mut self) -> _SYS_SYS_EEROM_XTAL_FREQ_SELW {
_SYS_SYS_EEROM_XTAL_FREQ_SELW { w: self }
}
}
| 27.148472 | 84 | 0.553 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.