hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
ffe7f0e3c6d5cb0d392052b30965311ac5019927 | 3,860 | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{sleep::sleep_with_handle, GMsg};
use futures::channel::oneshot;
use seed::{prelude::*, Request, *};
use std::time::Duration;
static DB_NAME: &str = "iml_stats";
pub struct Model {
fs_name: Option<String>,
cancel: Option<oneshot::Sender<()>>,
pub metric_data: Option<FsUsage>,
pub percent_used: f64,
}
impl Default for Model {
fn default() -> Self {
Self {
fs_name: None,
cancel: None,
metric_data: None,
percent_used: f64::default(),
}
}
}
impl Model {
pub fn new(fs_name: impl Into<Option<String>>) -> Self {
Self {
fs_name: fs_name.into(),
..Default::default()
}
}
}
#[derive(serde::Deserialize, Clone, Debug)]
pub struct InfluxSeries {
#[serde(skip)]
name: String,
#[serde(skip)]
columns: Vec<String>,
values: Vec<(String, f64, f64, f64)>,
}
#[derive(serde::Deserialize, Clone, Debug)]
pub struct InfluxResult {
#[serde(skip)]
statement_id: u16,
series: Option<Vec<InfluxSeries>>,
}
#[derive(serde::Deserialize, Clone, Debug)]
pub struct InfluxResults {
results: Vec<InfluxResult>,
}
#[derive(serde::Deserialize, Clone, Debug)]
pub struct FsUsage {
pub bytes_used: f64,
pub bytes_avail: f64,
pub bytes_total: f64,
}
#[derive(Clone, Debug)]
pub enum Msg {
DataFetched(Box<seed::fetch::ResponseDataResult<InfluxResults>>),
FetchData,
Noop,
}
async fn fetch_metrics(db: &str, query: String) -> Result<Msg, Msg> {
let url = format!("/influx?db={}&q={}", db, query);
Request::new(url)
.fetch_json_data(|x| Msg::DataFetched(Box::new(x)))
.await
}
pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders<Msg, GMsg>) {
match msg {
Msg::FetchData => {
let part = if let Some(fs_name) = &model.fs_name {
format!(r#"AND "fs" = '{}'"#, fs_name)
} else {
"".into()
};
let query = format!(
r#"SELECT SUM(bytes_total) as bytes_total,
SUM(bytes_free) as bytes_free,
SUM("bytes_avail") as bytes_avail
FROM (
SELECT LAST("bytes_total") AS bytes_total,
LAST("bytes_free") as bytes_free,
LAST("bytes_avail") as bytes_avail
FROM "target" WHERE "kind" = 'OST' {} GROUP BY target
)
"#,
part
);
orders.skip().perform_cmd(fetch_metrics(DB_NAME, query));
}
Msg::DataFetched(influx_data) => {
match *influx_data {
Ok(influx_data) => {
let result: &InfluxResult = &influx_data.results[0];
if let Some(series) = &(*result).series {
let bytes_total = series[0].values[0].1;
let bytes_free = series[0].values[0].2;
let bytes_avail = series[0].values[0].3;
let bytes_used = bytes_total - bytes_free;
model.metric_data = Some(FsUsage {
bytes_used,
bytes_avail,
bytes_total,
});
model.percent_used = bytes_used / bytes_total;
}
}
Err(e) => {
error!("Failed to fetch filesystem usage metrics - {:#?}", e);
orders.skip();
}
}
let (cancel, fut) = sleep_with_handle(Duration::from_secs(10), Msg::FetchData, Msg::Noop);
model.cancel = Some(cancel);
orders.perform_cmd(fut);
}
Msg::Noop => {}
}
}
| 27.183099 | 102 | 0.533679 |
f7faf03ed1d94204756a85002284c85f17a04836 | 2,760 | /*
Copyright 2021 JFrog Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use crate::network::artifact_protocol::{ArtifactExchangeCodec, ArtifactRequest, ArtifactResponse};
use crate::network::idle_metric_protocol::{
IdleMetricExchangeCodec, IdleMetricRequest, IdleMetricResponse,
};
use libp2p::identify::{Identify, IdentifyEvent};
use libp2p::kad::record::store::MemoryStore;
use libp2p::kad::{Kademlia, KademliaEvent};
use libp2p::request_response::{RequestResponse, RequestResponseEvent};
use libp2p::NetworkBehaviour;
/// Defines the [`NetworkBehaviour`] to be used in the libp2p
/// Swarm. The PyrsiaNetworkBehaviour consists of the following
/// behaviours:
///
/// * [`Identify`]
/// * [`Kademlia`]
/// * [`RequestResponse`] for exchanging artifacts
#[derive(NetworkBehaviour)]
#[behaviour(out_event = "PyrsiaNetworkEvent")]
pub struct PyrsiaNetworkBehaviour {
pub identify: Identify,
pub kademlia: Kademlia<MemoryStore>,
pub request_response: RequestResponse<ArtifactExchangeCodec>,
pub idle_metric_request_response: RequestResponse<IdleMetricExchangeCodec>,
}
/// Each event in the `PyrsiaNetworkBehaviour` is wrapped in a
/// `PyrsiaNetworkEvent`.
#[derive(Debug)]
pub enum PyrsiaNetworkEvent {
Identify(IdentifyEvent),
Kademlia(KademliaEvent),
RequestResponse(RequestResponseEvent<ArtifactRequest, ArtifactResponse>),
IdleMetricRequestResponse(RequestResponseEvent<IdleMetricRequest, IdleMetricResponse>),
}
impl From<IdentifyEvent> for PyrsiaNetworkEvent {
fn from(event: IdentifyEvent) -> Self {
PyrsiaNetworkEvent::Identify(event)
}
}
impl From<KademliaEvent> for PyrsiaNetworkEvent {
fn from(event: KademliaEvent) -> Self {
PyrsiaNetworkEvent::Kademlia(event)
}
}
impl From<RequestResponseEvent<ArtifactRequest, ArtifactResponse>> for PyrsiaNetworkEvent {
fn from(event: RequestResponseEvent<ArtifactRequest, ArtifactResponse>) -> Self {
PyrsiaNetworkEvent::RequestResponse(event)
}
}
impl From<RequestResponseEvent<IdleMetricRequest, IdleMetricResponse>> for PyrsiaNetworkEvent {
fn from(event: RequestResponseEvent<IdleMetricRequest, IdleMetricResponse>) -> Self {
PyrsiaNetworkEvent::IdleMetricRequestResponse(event)
}
}
| 35.844156 | 98 | 0.762319 |
035ee7248fc901e0d17057143db52b109f972b4f | 19,720 | pub(crate) mod division;
use std::cmp::max;
use std::iter::Sum;
use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign};
use anyhow::{ensure, Result};
use itertools::Itertools;
use plonky2_util::log2_strict;
use serde::{Deserialize, Serialize};
use crate::extension_field::{Extendable, FieldExtension};
use crate::fft::{fft, fft_with_options, ifft, FftRootTable};
use crate::field_types::Field;
/// A polynomial in point-value form.
///
/// The points are implicitly `g^i`, where `g` generates the subgroup whose size equals the number
/// of points.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PolynomialValues<F: Field> {
pub values: Vec<F>,
}
impl<F: Field> PolynomialValues<F> {
pub fn new(values: Vec<F>) -> Self {
PolynomialValues { values }
}
pub fn constant(value: F, len: usize) -> Self {
Self::new(vec![value; len])
}
pub fn zero(len: usize) -> Self {
Self::constant(F::ZERO, len)
}
/// Returns the polynomial whole value is one at the given index, and zero elsewhere.
pub fn selector(len: usize, index: usize) -> Self {
let mut result = Self::zero(len);
result.values[index] = F::ONE;
result
}
/// The number of values stored.
pub fn len(&self) -> usize {
self.values.len()
}
pub fn ifft(self) -> PolynomialCoeffs<F> {
ifft(self)
}
/// Returns the polynomial whose evaluation on the coset `shift*H` is `self`.
pub fn coset_ifft(self, shift: F) -> PolynomialCoeffs<F> {
let mut shifted_coeffs = self.ifft();
shifted_coeffs
.coeffs
.iter_mut()
.zip(shift.inverse().powers())
.for_each(|(c, r)| {
*c *= r;
});
shifted_coeffs
}
pub fn lde_multiple(polys: Vec<Self>, rate_bits: usize) -> Vec<Self> {
polys.into_iter().map(|p| p.lde(rate_bits)).collect()
}
pub fn lde(self, rate_bits: usize) -> Self {
let coeffs = ifft(self).lde(rate_bits);
fft_with_options(coeffs, Some(rate_bits), None)
}
/// Low-degree extend `Self` (seen as evaluations over the subgroup) onto a coset.
pub fn lde_onto_coset(self, rate_bits: usize) -> Self {
let coeffs = ifft(self).lde(rate_bits);
coeffs.coset_fft_with_options(F::coset_shift(), Some(rate_bits), None)
}
pub fn degree(&self) -> usize {
self.degree_plus_one()
.checked_sub(1)
.expect("deg(0) is undefined")
}
pub fn degree_plus_one(&self) -> usize {
self.clone().ifft().degree_plus_one()
}
/// Adds `rhs * rhs_weight` to `self`. Assumes `self.len() == rhs.len()`.
pub fn add_assign_scaled(&mut self, rhs: &Self, rhs_weight: F) {
self.values
.iter_mut()
.zip_eq(&rhs.values)
.for_each(|(self_v, rhs_v)| *self_v += *rhs_v * rhs_weight)
}
}
impl<F: Field> From<Vec<F>> for PolynomialValues<F> {
fn from(values: Vec<F>) -> Self {
Self::new(values)
}
}
/// A polynomial in coefficient form.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct PolynomialCoeffs<F: Field> {
pub coeffs: Vec<F>,
}
impl<F: Field> PolynomialCoeffs<F> {
pub fn new(coeffs: Vec<F>) -> Self {
PolynomialCoeffs { coeffs }
}
pub fn empty() -> Self {
Self::new(Vec::new())
}
pub fn zero(len: usize) -> Self {
Self::new(vec![F::ZERO; len])
}
pub fn is_zero(&self) -> bool {
self.coeffs.iter().all(|x| x.is_zero())
}
/// The number of coefficients. This does not filter out any zero coefficients, so it is not
/// necessarily related to the degree.
pub fn len(&self) -> usize {
self.coeffs.len()
}
pub fn log_len(&self) -> usize {
log2_strict(self.len())
}
pub fn chunks(&self, chunk_size: usize) -> Vec<Self> {
self.coeffs
.chunks(chunk_size)
.map(|chunk| PolynomialCoeffs::new(chunk.to_vec()))
.collect()
}
pub fn eval(&self, x: F) -> F {
self.coeffs
.iter()
.rev()
.fold(F::ZERO, |acc, &c| acc * x + c)
}
/// Evaluate the polynomial at a point given its powers. The first power is the point itself, not 1.
pub fn eval_with_powers(&self, powers: &[F]) -> F {
debug_assert_eq!(self.coeffs.len(), powers.len() + 1);
let acc = self.coeffs[0];
self.coeffs[1..]
.iter()
.zip(powers)
.fold(acc, |acc, (&x, &c)| acc + c * x)
}
pub fn eval_base<const D: usize>(&self, x: F::BaseField) -> F
where
F: FieldExtension<D>,
{
self.coeffs
.iter()
.rev()
.fold(F::ZERO, |acc, &c| acc.scalar_mul(x) + c)
}
/// Evaluate the polynomial at a point given its powers. The first power is the point itself, not 1.
pub fn eval_base_with_powers<const D: usize>(&self, powers: &[F::BaseField]) -> F
where
F: FieldExtension<D>,
{
debug_assert_eq!(self.coeffs.len(), powers.len() + 1);
let acc = self.coeffs[0];
self.coeffs[1..]
.iter()
.zip(powers)
.fold(acc, |acc, (&x, &c)| acc + x.scalar_mul(c))
}
pub fn lde_multiple(polys: Vec<&Self>, rate_bits: usize) -> Vec<Self> {
polys.into_iter().map(|p| p.lde(rate_bits)).collect()
}
pub fn lde(&self, rate_bits: usize) -> Self {
self.padded(self.len() << rate_bits)
}
pub fn pad(&mut self, new_len: usize) -> Result<()> {
ensure!(
new_len >= self.len(),
"Trying to pad a polynomial of length {} to a length of {}.",
self.len(),
new_len
);
self.coeffs.resize(new_len, F::ZERO);
Ok(())
}
pub fn padded(&self, new_len: usize) -> Self {
let mut poly = self.clone();
poly.pad(new_len).unwrap();
poly
}
/// Removes any leading zero coefficients.
pub fn trim(&mut self) {
self.coeffs.truncate(self.degree_plus_one());
}
/// Removes some leading zero coefficients, such that a desired length is reached. Fails if a
/// nonzero coefficient is encountered before then.
pub fn trim_to_len(&mut self, len: usize) -> Result<()> {
ensure!(self.len() >= len);
ensure!(self.coeffs[len..].iter().all(F::is_zero));
self.coeffs.truncate(len);
Ok(())
}
/// Removes any leading zero coefficients.
pub fn trimmed(&self) -> Self {
let coeffs = self.coeffs[..self.degree_plus_one()].to_vec();
Self { coeffs }
}
/// Degree of the polynomial + 1, or 0 for a polynomial with no non-zero coefficients.
pub fn degree_plus_one(&self) -> usize {
(0usize..self.len())
.rev()
.find(|&i| self.coeffs[i].is_nonzero())
.map_or(0, |i| i + 1)
}
/// Leading coefficient.
pub fn lead(&self) -> F {
self.coeffs
.iter()
.rev()
.find(|x| x.is_nonzero())
.map_or(F::ZERO, |x| *x)
}
/// Reverse the order of the coefficients, not taking into account the leading zero coefficients.
pub(crate) fn rev(&self) -> Self {
Self::new(self.trimmed().coeffs.into_iter().rev().collect())
}
pub fn fft(self) -> PolynomialValues<F> {
fft(self)
}
pub fn fft_with_options(
self,
zero_factor: Option<usize>,
root_table: Option<&FftRootTable<F>>,
) -> PolynomialValues<F> {
fft_with_options(self, zero_factor, root_table)
}
/// Returns the evaluation of the polynomial on the coset `shift*H`.
pub fn coset_fft(&self, shift: F) -> PolynomialValues<F> {
self.coset_fft_with_options(shift, None, None)
}
/// Returns the evaluation of the polynomial on the coset `shift*H`.
pub fn coset_fft_with_options(
&self,
shift: F,
zero_factor: Option<usize>,
root_table: Option<&FftRootTable<F>>,
) -> PolynomialValues<F> {
let modified_poly: Self = shift
.powers()
.zip(&self.coeffs)
.map(|(r, &c)| r * c)
.collect::<Vec<_>>()
.into();
modified_poly.fft_with_options(zero_factor, root_table)
}
pub fn to_extension<const D: usize>(&self) -> PolynomialCoeffs<F::Extension>
where
F: Extendable<D>,
{
PolynomialCoeffs::new(self.coeffs.iter().map(|&c| c.into()).collect())
}
pub fn mul_extension<const D: usize>(&self, rhs: F::Extension) -> PolynomialCoeffs<F::Extension>
where
F: Extendable<D>,
{
PolynomialCoeffs::new(self.coeffs.iter().map(|&c| rhs.scalar_mul(c)).collect())
}
}
impl<F: Field> PartialEq for PolynomialCoeffs<F> {
fn eq(&self, other: &Self) -> bool {
let max_terms = self.coeffs.len().max(other.coeffs.len());
for i in 0..max_terms {
let self_i = self.coeffs.get(i).cloned().unwrap_or(F::ZERO);
let other_i = other.coeffs.get(i).cloned().unwrap_or(F::ZERO);
if self_i != other_i {
return false;
}
}
true
}
}
impl<F: Field> Eq for PolynomialCoeffs<F> {}
impl<F: Field> From<Vec<F>> for PolynomialCoeffs<F> {
fn from(coeffs: Vec<F>) -> Self {
Self::new(coeffs)
}
}
impl<F: Field> Add for &PolynomialCoeffs<F> {
type Output = PolynomialCoeffs<F>;
fn add(self, rhs: Self) -> Self::Output {
let len = max(self.len(), rhs.len());
let a = self.padded(len).coeffs;
let b = rhs.padded(len).coeffs;
let coeffs = a.into_iter().zip(b).map(|(x, y)| x + y).collect();
PolynomialCoeffs::new(coeffs)
}
}
impl<F: Field> Sum for PolynomialCoeffs<F> {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::empty(), |acc, p| &acc + &p)
}
}
impl<F: Field> Sub for &PolynomialCoeffs<F> {
type Output = PolynomialCoeffs<F>;
fn sub(self, rhs: Self) -> Self::Output {
let len = max(self.len(), rhs.len());
let mut coeffs = self.padded(len).coeffs;
for (i, &c) in rhs.coeffs.iter().enumerate() {
coeffs[i] -= c;
}
PolynomialCoeffs::new(coeffs)
}
}
impl<F: Field> AddAssign for PolynomialCoeffs<F> {
fn add_assign(&mut self, rhs: Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, r) in self.coeffs.iter_mut().zip(rhs.coeffs) {
*l += r;
}
}
}
impl<F: Field> AddAssign<&Self> for PolynomialCoeffs<F> {
fn add_assign(&mut self, rhs: &Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, &r) in self.coeffs.iter_mut().zip(&rhs.coeffs) {
*l += r;
}
}
}
impl<F: Field> SubAssign for PolynomialCoeffs<F> {
fn sub_assign(&mut self, rhs: Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, r) in self.coeffs.iter_mut().zip(rhs.coeffs) {
*l -= r;
}
}
}
impl<F: Field> SubAssign<&Self> for PolynomialCoeffs<F> {
fn sub_assign(&mut self, rhs: &Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, &r) in self.coeffs.iter_mut().zip(&rhs.coeffs) {
*l -= r;
}
}
}
impl<F: Field> Mul<F> for &PolynomialCoeffs<F> {
type Output = PolynomialCoeffs<F>;
fn mul(self, rhs: F) -> Self::Output {
let coeffs = self.coeffs.iter().map(|&x| rhs * x).collect();
PolynomialCoeffs::new(coeffs)
}
}
impl<F: Field> MulAssign<F> for PolynomialCoeffs<F> {
fn mul_assign(&mut self, rhs: F) {
self.coeffs.iter_mut().for_each(|x| *x *= rhs);
}
}
impl<F: Field> Mul for &PolynomialCoeffs<F> {
type Output = PolynomialCoeffs<F>;
#[allow(clippy::suspicious_arithmetic_impl)]
fn mul(self, rhs: Self) -> Self::Output {
let new_len = (self.len() + rhs.len()).next_power_of_two();
let a = self.padded(new_len);
let b = rhs.padded(new_len);
let a_evals = a.fft();
let b_evals = b.fft();
let mul_evals: Vec<F> = a_evals
.values
.into_iter()
.zip(b_evals.values)
.map(|(pa, pb)| pa * pb)
.collect();
ifft(mul_evals.into())
}
}
#[cfg(test)]
mod tests {
use std::time::Instant;
use rand::{thread_rng, Rng};
use super::*;
use crate::goldilocks_field::GoldilocksField;
#[test]
fn test_trimmed() {
type F = GoldilocksField;
assert_eq!(
PolynomialCoeffs::<F> { coeffs: vec![] }.trimmed(),
PolynomialCoeffs::<F> { coeffs: vec![] }
);
assert_eq!(
PolynomialCoeffs::<F> {
coeffs: vec![F::ZERO]
}
.trimmed(),
PolynomialCoeffs::<F> { coeffs: vec![] }
);
assert_eq!(
PolynomialCoeffs::<F> {
coeffs: vec![F::ONE, F::TWO, F::ZERO, F::ZERO]
}
.trimmed(),
PolynomialCoeffs::<F> {
coeffs: vec![F::ONE, F::TWO]
}
);
}
#[test]
fn test_coset_fft() {
type F = GoldilocksField;
let k = 8;
let n = 1 << k;
let poly = PolynomialCoeffs::new(F::rand_vec(n));
let shift = F::rand();
let coset_evals = poly.coset_fft(shift).values;
let generator = F::primitive_root_of_unity(k);
let naive_coset_evals = F::cyclic_subgroup_coset_known_order(generator, shift, n)
.into_iter()
.map(|x| poly.eval(x))
.collect::<Vec<_>>();
assert_eq!(coset_evals, naive_coset_evals);
let ifft_coeffs = PolynomialValues::new(coset_evals).coset_ifft(shift);
assert_eq!(poly, ifft_coeffs);
}
#[test]
fn test_coset_ifft() {
type F = GoldilocksField;
let k = 8;
let n = 1 << k;
let evals = PolynomialValues::new(F::rand_vec(n));
let shift = F::rand();
let coeffs = evals.clone().coset_ifft(shift);
let generator = F::primitive_root_of_unity(k);
let naive_coset_evals = F::cyclic_subgroup_coset_known_order(generator, shift, n)
.into_iter()
.map(|x| coeffs.eval(x))
.collect::<Vec<_>>();
assert_eq!(evals, naive_coset_evals.into());
let fft_evals = coeffs.coset_fft(shift);
assert_eq!(evals, fft_evals);
}
#[test]
fn test_polynomial_multiplication() {
type F = GoldilocksField;
let mut rng = thread_rng();
let (a_deg, b_deg) = (rng.gen_range(1..10_000), rng.gen_range(1..10_000));
let a = PolynomialCoeffs::new(F::rand_vec(a_deg));
let b = PolynomialCoeffs::new(F::rand_vec(b_deg));
let m1 = &a * &b;
let m2 = &a * &b;
for _ in 0..1000 {
let x = F::rand();
assert_eq!(m1.eval(x), a.eval(x) * b.eval(x));
assert_eq!(m2.eval(x), a.eval(x) * b.eval(x));
}
}
#[test]
fn test_inv_mod_xn() {
type F = GoldilocksField;
let mut rng = thread_rng();
let a_deg = rng.gen_range(0..1_000);
let n = rng.gen_range(1..1_000);
let mut a = PolynomialCoeffs::new(F::rand_vec(a_deg + 1));
if a.coeffs[0].is_zero() {
a.coeffs[0] = F::ONE; // First coefficient needs to be nonzero.
}
let b = a.inv_mod_xn(n);
let mut m = &a * &b;
m.coeffs.truncate(n);
m.trim();
assert_eq!(
m,
PolynomialCoeffs::new(vec![F::ONE]),
"a: {:#?}, b:{:#?}, n:{:#?}, m:{:#?}",
a,
b,
n,
m
);
}
#[test]
fn test_polynomial_long_division() {
type F = GoldilocksField;
let mut rng = thread_rng();
let (a_deg, b_deg) = (rng.gen_range(1..10_000), rng.gen_range(1..10_000));
let a = PolynomialCoeffs::new(F::rand_vec(a_deg));
let b = PolynomialCoeffs::new(F::rand_vec(b_deg));
let (q, r) = a.div_rem_long_division(&b);
for _ in 0..1000 {
let x = F::rand();
assert_eq!(a.eval(x), b.eval(x) * q.eval(x) + r.eval(x));
}
}
#[test]
fn test_polynomial_division() {
type F = GoldilocksField;
let mut rng = thread_rng();
let (a_deg, b_deg) = (rng.gen_range(1..10_000), rng.gen_range(1..10_000));
let a = PolynomialCoeffs::new(F::rand_vec(a_deg));
let b = PolynomialCoeffs::new(F::rand_vec(b_deg));
let (q, r) = a.div_rem(&b);
for _ in 0..1000 {
let x = F::rand();
assert_eq!(a.eval(x), b.eval(x) * q.eval(x) + r.eval(x));
}
}
#[test]
fn test_polynomial_division_by_constant() {
type F = GoldilocksField;
let mut rng = thread_rng();
let a_deg = rng.gen_range(1..10_000);
let a = PolynomialCoeffs::new(F::rand_vec(a_deg));
let b = PolynomialCoeffs::from(vec![F::rand()]);
let (q, r) = a.div_rem(&b);
for _ in 0..1000 {
let x = F::rand();
assert_eq!(a.eval(x), b.eval(x) * q.eval(x) + r.eval(x));
}
}
// Test to see which polynomial division method is faster for divisions of the type
// `(X^n - 1)/(X - a)
#[test]
fn test_division_linear() {
type F = GoldilocksField;
let mut rng = thread_rng();
let l = 14;
let n = 1 << l;
let g = F::primitive_root_of_unity(l);
let xn_minus_one = {
let mut xn_min_one_vec = vec![F::ZERO; n + 1];
xn_min_one_vec[n] = F::ONE;
xn_min_one_vec[0] = F::NEG_ONE;
PolynomialCoeffs::new(xn_min_one_vec)
};
let a = g.exp_u64(rng.gen_range(0..(n as u64)));
let denom = PolynomialCoeffs::new(vec![-a, F::ONE]);
let now = Instant::now();
xn_minus_one.div_rem(&denom);
println!("Division time: {:?}", now.elapsed());
let now = Instant::now();
xn_minus_one.div_rem_long_division(&denom);
println!("Division time: {:?}", now.elapsed());
}
#[test]
fn eq() {
type F = GoldilocksField;
assert_eq!(
PolynomialCoeffs::<F>::new(vec![]),
PolynomialCoeffs::new(vec![])
);
assert_eq!(
PolynomialCoeffs::<F>::new(vec![F::ZERO]),
PolynomialCoeffs::new(vec![F::ZERO])
);
assert_eq!(
PolynomialCoeffs::<F>::new(vec![]),
PolynomialCoeffs::new(vec![F::ZERO])
);
assert_eq!(
PolynomialCoeffs::<F>::new(vec![F::ZERO]),
PolynomialCoeffs::new(vec![])
);
assert_eq!(
PolynomialCoeffs::<F>::new(vec![F::ZERO]),
PolynomialCoeffs::new(vec![F::ZERO, F::ZERO])
);
assert_eq!(
PolynomialCoeffs::<F>::new(vec![F::ONE]),
PolynomialCoeffs::new(vec![F::ONE, F::ZERO])
);
assert_ne!(
PolynomialCoeffs::<F>::new(vec![]),
PolynomialCoeffs::new(vec![F::ONE])
);
assert_ne!(
PolynomialCoeffs::<F>::new(vec![F::ZERO]),
PolynomialCoeffs::new(vec![F::ZERO, F::ONE])
);
assert_ne!(
PolynomialCoeffs::<F>::new(vec![F::ZERO]),
PolynomialCoeffs::new(vec![F::ONE, F::ZERO])
);
}
}
| 29.924127 | 104 | 0.539706 |
21348c35e73f1a27693799ab247293dcbe347538 | 7,619 | use super::private;
use super::IntoSeries;
use super::SeriesTrait;
use crate::chunked_array::comparison::*;
use crate::chunked_array::{ops::explode::ExplodeByOffsets, AsSinglePtr};
use crate::fmt::FmtList;
use crate::frame::groupby::*;
use crate::prelude::*;
use crate::series::implementations::SeriesWrap;
use arrow::array::ArrayRef;
use polars_arrow::prelude::QuantileInterpolOptions;
use std::any::Any;
use std::borrow::Cow;
impl IntoSeries for ListChunked {
fn into_series(self) -> Series {
Series(Arc::new(SeriesWrap(self)))
}
}
impl private::PrivateSeries for SeriesWrap<ListChunked> {
fn _field(&self) -> Cow<Field> {
Cow::Borrowed(self.0.ref_field())
}
fn _dtype(&self) -> &DataType {
self.0.ref_field().data_type()
}
fn explode_by_offsets(&self, offsets: &[i64]) -> Series {
self.0.explode_by_offsets(offsets)
}
fn set_sorted(&mut self, reverse: bool) {
self.0.set_sorted(reverse)
}
unsafe fn equal_element(&self, idx_self: usize, idx_other: usize, other: &Series) -> bool {
self.0.equal_element(idx_self, idx_other, other)
}
#[cfg(feature = "zip_with")]
fn zip_with_same_type(&self, mask: &BooleanChunked, other: &Series) -> Result<Series> {
ChunkZip::zip_with(&self.0, mask, other.as_ref().as_ref()).map(|ca| ca.into_series())
}
fn agg_list(&self, groups: &GroupsProxy) -> Series {
self.0.agg_list(groups)
}
fn group_tuples(&self, multithreaded: bool, sorted: bool) -> GroupsProxy {
IntoGroupsProxy::group_tuples(&self.0, multithreaded, sorted)
}
}
impl SeriesTrait for SeriesWrap<ListChunked> {
#[cfg(feature = "interpolate")]
fn interpolate(&self) -> Series {
self.0.clone().into_series()
}
fn rename(&mut self, name: &str) {
self.0.rename(name);
}
fn chunk_lengths(&self) -> ChunkIdIter {
self.0.chunk_id()
}
fn name(&self) -> &str {
self.0.name()
}
fn chunks(&self) -> &Vec<ArrayRef> {
self.0.chunks()
}
fn shrink_to_fit(&mut self) {
self.0.shrink_to_fit()
}
fn list(&self) -> Result<&ListChunked> {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const ListChunked)) }
}
fn append_array(&mut self, other: ArrayRef) -> Result<()> {
self.0.append_array(other)
}
fn slice(&self, offset: i64, length: usize) -> Series {
self.0.slice(offset, length).into_series()
}
fn append(&mut self, other: &Series) -> Result<()> {
if self.0.dtype() == other.dtype() {
self.0.append(other.as_ref().as_ref());
Ok(())
} else {
Err(PolarsError::SchemaMisMatch(
"cannot append Series; data types don't match".into(),
))
}
}
fn extend(&mut self, other: &Series) -> Result<()> {
if self.0.dtype() == other.dtype() {
self.0.extend(other.as_ref().as_ref());
Ok(())
} else {
Err(PolarsError::SchemaMisMatch(
"cannot extend Series; data types don't match".into(),
))
}
}
fn filter(&self, filter: &BooleanChunked) -> Result<Series> {
ChunkFilter::filter(&self.0, filter).map(|ca| ca.into_series())
}
#[cfg(feature = "chunked_ids")]
unsafe fn _take_chunked_unchecked(&self, by: &[ChunkId]) -> Series {
self.0.take_chunked_unchecked(by).into_series()
}
#[cfg(feature = "chunked_ids")]
unsafe fn _take_opt_chunked_unchecked(&self, by: &[Option<ChunkId>]) -> Series {
self.0.take_opt_chunked_unchecked(by).into_series()
}
fn take(&self, indices: &IdxCa) -> Result<Series> {
let indices = if indices.chunks.len() > 1 {
Cow::Owned(indices.rechunk())
} else {
Cow::Borrowed(indices)
};
Ok(ChunkTake::take(&self.0, (&*indices).into())?.into_series())
}
fn take_iter(&self, iter: &mut dyn TakeIterator) -> Result<Series> {
Ok(ChunkTake::take(&self.0, iter.into())?.into_series())
}
fn take_every(&self, n: usize) -> Series {
self.0.take_every(n).into_series()
}
unsafe fn take_iter_unchecked(&self, iter: &mut dyn TakeIterator) -> Series {
ChunkTake::take_unchecked(&self.0, iter.into()).into_series()
}
unsafe fn take_unchecked(&self, idx: &IdxCa) -> Result<Series> {
let idx = if idx.chunks.len() > 1 {
Cow::Owned(idx.rechunk())
} else {
Cow::Borrowed(idx)
};
Ok(ChunkTake::take_unchecked(&self.0, (&*idx).into()).into_series())
}
unsafe fn take_opt_iter_unchecked(&self, iter: &mut dyn TakeIteratorNulls) -> Series {
ChunkTake::take_unchecked(&self.0, iter.into()).into_series()
}
#[cfg(feature = "take_opt_iter")]
fn take_opt_iter(&self, iter: &mut dyn TakeIteratorNulls) -> Result<Series> {
Ok(ChunkTake::take(&self.0, iter.into())?.into_series())
}
fn len(&self) -> usize {
self.0.len()
}
fn rechunk(&self) -> Series {
ChunkOps::rechunk(&self.0).into_series()
}
fn expand_at_index(&self, index: usize, length: usize) -> Series {
ChunkExpandAtIndex::expand_at_index(&self.0, index, length).into_series()
}
fn cast(&self, data_type: &DataType) -> Result<Series> {
self.0.cast(data_type)
}
fn get(&self, index: usize) -> AnyValue {
self.0.get_any_value(index)
}
#[inline]
#[cfg(feature = "private")]
unsafe fn get_unchecked(&self, index: usize) -> AnyValue {
self.0.get_any_value_unchecked(index)
}
fn null_count(&self) -> usize {
self.0.null_count()
}
fn has_validity(&self) -> bool {
self.0.has_validity()
}
fn is_null(&self) -> BooleanChunked {
self.0.is_null()
}
fn is_not_null(&self) -> BooleanChunked {
self.0.is_not_null()
}
fn reverse(&self) -> Series {
ChunkReverse::reverse(&self.0).into_series()
}
fn as_single_ptr(&mut self) -> Result<usize> {
self.0.as_single_ptr()
}
fn shift(&self, periods: i64) -> Series {
ChunkShift::shift(&self.0, periods).into_series()
}
fn fill_null(&self, strategy: FillNullStrategy) -> Result<Series> {
ChunkFillNull::fill_null(&self.0, strategy).map(|ca| ca.into_series())
}
fn _sum_as_series(&self) -> Series {
ChunkAggSeries::sum_as_series(&self.0)
}
fn max_as_series(&self) -> Series {
ChunkAggSeries::max_as_series(&self.0)
}
fn min_as_series(&self) -> Series {
ChunkAggSeries::min_as_series(&self.0)
}
fn median_as_series(&self) -> Series {
QuantileAggSeries::median_as_series(&self.0)
}
fn var_as_series(&self) -> Series {
VarAggSeries::var_as_series(&self.0)
}
fn std_as_series(&self) -> Series {
VarAggSeries::std_as_series(&self.0)
}
fn quantile_as_series(
&self,
quantile: f64,
interpol: QuantileInterpolOptions,
) -> Result<Series> {
QuantileAggSeries::quantile_as_series(&self.0, quantile, interpol)
}
fn fmt_list(&self) -> String {
FmtList::fmt_list(&self.0)
}
fn clone_inner(&self) -> Arc<dyn SeriesTrait> {
Arc::new(SeriesWrap(Clone::clone(&self.0)))
}
fn as_any(&self) -> &dyn Any {
&self.0
}
/// Get a hold to self as `Any` trait reference.
/// Only implemented for ObjectType
fn as_any_mut(&mut self) -> &mut dyn Any {
&mut self.0
}
}
| 28.859848 | 95 | 0.592466 |
1665bbdd4c2c1b595da4b94ed12d73daca6d945a | 518 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(foo = "")] //~ ERROR E0452
fn main() {
}
| 34.533333 | 68 | 0.718147 |
de88333413c66e4f7e2baa1c94f28d3aa7c1b584 | 5,263 | // * This file is part of the uutils coreutils package.
// *
// * (c) 2014 Vsevolod Velichko <[email protected]>
// *
// * For the full copyright and license information, please view the LICENSE
// * file that was distributed with this source code.
// spell-checker:ignore (ToDO) retcode
#[macro_use]
extern crate uucore;
use clap::{crate_version, App, Arg};
use std::{
io::{stdout, Write},
path::{Path, PathBuf},
};
use uucore::{
display::{print_verbatim, Quotable},
error::{FromIo, UResult},
fs::{canonicalize, MissingHandling, ResolveMode},
};
static ABOUT: &str = "print the resolved path";
static OPT_QUIET: &str = "quiet";
static OPT_STRIP: &str = "strip";
static OPT_ZERO: &str = "zero";
static OPT_PHYSICAL: &str = "physical";
static OPT_LOGICAL: &str = "logical";
const OPT_CANONICALIZE_MISSING: &str = "canonicalize-missing";
const OPT_CANONICALIZE_EXISTING: &str = "canonicalize-existing";
static ARG_FILES: &str = "files";
fn usage() -> String {
format!("{0} [OPTION]... FILE...", uucore::execution_phrase())
}
#[uucore_procs::gen_uumain]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let usage = usage();
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
/* the list of files */
let paths: Vec<PathBuf> = matches
.values_of(ARG_FILES)
.unwrap()
.map(PathBuf::from)
.collect();
let strip = matches.is_present(OPT_STRIP);
let zero = matches.is_present(OPT_ZERO);
let quiet = matches.is_present(OPT_QUIET);
let logical = matches.is_present(OPT_LOGICAL);
let can_mode = if matches.is_present(OPT_CANONICALIZE_EXISTING) {
MissingHandling::Existing
} else if matches.is_present(OPT_CANONICALIZE_MISSING) {
MissingHandling::Missing
} else {
MissingHandling::Normal
};
for path in &paths {
let result = resolve_path(path, strip, zero, logical, can_mode);
if !quiet {
show_if_err!(result.map_err_context(|| path.maybe_quote().to_string()));
}
}
// Although we return `Ok`, it is possible that a call to
// `show!()` above has set the exit code for the program to a
// non-zero integer.
Ok(())
}
pub fn uu_app() -> App<'static, 'static> {
App::new(uucore::util_name())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(OPT_QUIET)
.short("q")
.long(OPT_QUIET)
.help("Do not print warnings for invalid paths"),
)
.arg(
Arg::with_name(OPT_STRIP)
.short("s")
.long(OPT_STRIP)
.help("Only strip '.' and '..' components, but don't resolve symbolic links"),
)
.arg(
Arg::with_name(OPT_ZERO)
.short("z")
.long(OPT_ZERO)
.help("Separate output filenames with \\0 rather than newline"),
)
.arg(
Arg::with_name(OPT_LOGICAL)
.short("L")
.long(OPT_LOGICAL)
.help("resolve '..' components before symlinks"),
)
.arg(
Arg::with_name(OPT_PHYSICAL)
.short("P")
.long(OPT_PHYSICAL)
.overrides_with_all(&[OPT_STRIP, OPT_LOGICAL])
.help("resolve symlinks as encountered (default)"),
)
.arg(
Arg::with_name(OPT_CANONICALIZE_EXISTING)
.short("e")
.long(OPT_CANONICALIZE_EXISTING)
.help(
"canonicalize by following every symlink in every component of the \
given name recursively, all components must exist",
),
)
.arg(
Arg::with_name(OPT_CANONICALIZE_MISSING)
.short("m")
.long(OPT_CANONICALIZE_MISSING)
.help(
"canonicalize by following every symlink in every component of the \
given name recursively, without requirements on components existence",
),
)
.arg(
Arg::with_name(ARG_FILES)
.multiple(true)
.takes_value(true)
.required(true)
.min_values(1),
)
}
/// Resolve a path to an absolute form and print it.
///
/// If `strip` is `true`, then this function does not attempt to resolve
/// symbolic links in the path. If `zero` is `true`, then this function
/// prints the path followed by the null byte (`'\0'`) instead of a
/// newline character (`'\n'`).
///
/// # Errors
///
/// This function returns an error if there is a problem resolving
/// symbolic links.
fn resolve_path(
p: &Path,
strip: bool,
zero: bool,
logical: bool,
can_mode: MissingHandling,
) -> std::io::Result<()> {
let resolve = if strip {
ResolveMode::None
} else if logical {
ResolveMode::Logical
} else {
ResolveMode::Physical
};
let abs = canonicalize(p, can_mode, resolve)?;
let line_ending = if zero { b'\0' } else { b'\n' };
print_verbatim(&abs)?;
stdout().write_all(&[line_ending])?;
Ok(())
}
| 30.777778 | 94 | 0.571537 |
2fe437ab75ca4f4f388494f538f554431a456272 | 5,539 | mod utils;
use std::fmt;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
// #[wasm_bindgen]
// extern "C" {
// fn alert(s: &str);
// }
// #[wasm_bindgen]
// pub fn greet(name: &str) {
// alert(&format!("Hello {}, how are you doing today?", name));
// }
#[wasm_bindgen]
// Specifies that each cell is represented as a single byte
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Cell {
// Dead is `0`, alive is `1`, we can easily count neighbors with addition
Dead = 0,
Alive = 1,
}
#[wasm_bindgen]
pub struct Universe {
width: u32,
height: u32,
cells: Vec<Cell>,
}
#[wasm_bindgen]
impl Universe {
#[wasm_bindgen]
pub fn new(width: u32, height: u32) -> Self {
// let mut cells = Vec::new();
// for _ in 0..width * height {
// cells.push(Cell::Dead)
// }
// let width = 64;
// let height = 64;
let cells = (0..width * height)
.map(|i| {
if i % 2 == 0 || i % 7 == 0 {
Cell::Alive
} else {
Cell::Dead
}
})
.collect();
Universe {
width,
height,
cells,
}
}
// Returns the index of the cell in terms of wasm linear memory
pub fn get_index(&self, row: u32, column: u32) -> usize {
// Multiply row by width and add column
(row * self.width + column) as usize
}
// Get count of live neighbors of a cell
fn live_neighbor_count(&self, row: u32, column: u32) -> u8 {
let mut neighbors = 0;
for dy in -1..2 {
for dx in -1..2 {
// Dont' do anything in the case of the square itself
if dy == 0 && dx == 0 {
continue;
}
neighbors += match self.cells[self.get_index(
(row as i32 + dy + self.height as i32) as u32 % self.height,
(column as i32 + dx + self.width as i32) as u32 % self.width,
)] {
Cell::Dead => 0,
Cell::Alive => 1,
}
}
}
neighbors
}
// Each game round
pub fn tick(&mut self) {
// Clone all cells so we can easily update the game board
let mut next = self.cells.clone();
// Loop through every cell in the board
for row in 0..self.width {
for column in 0..self.height {
// Get the actual index of this cell
let index = self.get_index(row, column);
let state = self.cells[index];
// Set this cell depending on the state of the cell and surrounding neighbors,
// According to Conway's game of life
// I fkin love rust enums & matching wtf
next[index] = match (state, self.live_neighbor_count(row, column)) {
// if alive,
// with 2-3 neighbors, you live
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
// otherwise, you die
(Cell::Alive, _) => Cell::Dead,
// if dead,
// width 3 neighbors, you are born
(Cell::Dead, 3) => Cell::Alive,
// otherwise, you ded
_ => Cell::Dead,
}
}
}
self.cells = next;
}
pub fn render(&self) -> String {
self.to_string()
}
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn toggle_cell(&mut self, idx: usize) {
let new_state = match self.cells[idx] {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead
};
self.cells[idx] = new_state;
}
}
impl fmt::Display for Universe {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for line in self.cells.as_slice().chunks(self.width as usize) {
for &cell in line {
let symbol = if cell == Cell::Dead { '◻' } else { '◼' };
write!(f, "{}", symbol)?;
}
write!(f, "\n")?;
}
Ok(())
}
}
#[cfg(test)]
mod testing {
use super::*;
#[test]
// Tests number of neighbors given cross
fn neighbors_full_test() {
let mut universe = Universe::new(3, 3);
// Fill up every cell
let mut indices: Vec<usize> = Vec::new();
for y in 0..universe.height {
for x in 0..universe.width {
indices.push(universe.get_index(y, x));
}
}
for idx in indices.iter() {
universe.cells[*idx] = Cell::Alive;
}
// Fill up every cell
// Bottom right
assert_eq!(universe.live_neighbor_count(2, 2), 8);
// Middle
assert_eq!(universe.live_neighbor_count(1, 1), 8);
// Top left
assert_eq!(universe.live_neighbor_count(0, 0), 8);
// Top right
assert_eq!(universe.live_neighbor_count(0, 2), 8);
// Bot left
assert_eq!(universe.live_neighbor_count(2, 0), 8);
}
}
| 27.834171 | 94 | 0.487633 |
6430d407fb6f628822d84d558cff65ac6272de60 | 2,738 | use async_trait::async_trait;
use avro_rs::{from_value, Codec, Reader, Schema, Writer};
use pipebase::{
common::{ConfigInto, FromConfig, FromPath},
map::Map,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Deserialize)]
enum Compression {
Null,
Deflate,
Snappy,
}
fn get_codec(compression: Compression) -> Codec {
match compression {
Compression::Null => Codec::Null,
Compression::Deflate => Codec::Deflate,
Compression::Snappy => Codec::Snappy,
}
}
#[derive(Deserialize)]
pub struct AvroSerConfig {
compression: Compression,
schema: String,
}
impl FromPath for AvroSerConfig {}
impl ConfigInto<AvroSer> for AvroSerConfig {}
pub struct AvroSer {
codec: Codec,
schema: Schema,
}
#[async_trait]
impl FromConfig<AvroSerConfig> for AvroSer {
async fn from_config(config: AvroSerConfig) -> anyhow::Result<Self> {
Ok(AvroSer {
codec: get_codec(config.compression),
schema: Schema::parse_str(&config.schema)?,
})
}
}
impl AvroSer {
fn serialize<T: Serialize>(
items: Vec<T>,
schema: &Schema,
codec: Codec,
) -> anyhow::Result<Vec<u8>> {
let mut writer = Writer::with_codec(schema, Vec::new(), codec);
for item in items {
writer.append_ser(item)?;
}
Ok(writer.into_inner()?)
}
}
#[async_trait]
impl<T> Map<Vec<T>, Vec<u8>, AvroSerConfig> for AvroSer
where
T: Serialize + Send + 'static,
{
async fn map(&mut self, data: Vec<T>) -> anyhow::Result<Vec<u8>> {
let schema = &self.schema;
let codec = self.codec;
Self::serialize(data, schema, codec)
}
}
#[derive(Deserialize)]
pub struct AvroDeserConfig {}
#[async_trait]
impl FromPath for AvroDeserConfig {
async fn from_path<P>(_path: P) -> anyhow::Result<Self>
where
P: AsRef<std::path::Path> + Send,
{
Ok(AvroDeserConfig {})
}
}
impl ConfigInto<AvroDeser> for AvroDeserConfig {}
pub struct AvroDeser {}
#[async_trait]
impl FromConfig<AvroDeserConfig> for AvroDeser {
async fn from_config(_config: AvroDeserConfig) -> anyhow::Result<Self> {
Ok(AvroDeser {})
}
}
impl AvroDeser {
fn deserialize<T: DeserializeOwned>(bytes: &[u8]) -> anyhow::Result<Vec<T>> {
let reader = Reader::new(bytes)?;
let mut items: Vec<T> = vec![];
for value in reader {
items.push(from_value::<T>(&value?)?);
}
Ok(items)
}
}
#[async_trait]
impl<T> Map<Vec<u8>, Vec<T>, AvroDeserConfig> for AvroDeser
where
T: DeserializeOwned,
{
async fn map(&mut self, data: Vec<u8>) -> anyhow::Result<Vec<T>> {
Self::deserialize(&data)
}
}
| 23.008403 | 81 | 0.6187 |
906a5fb99bdf09c587e227f5947d8cf11456bb48 | 9,874 | // Copyright 2018-2020 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::LocationStoreOperations;
use crate::commits::MAX_COMMIT_NUM;
use crate::location::store::diesel::{
schema::{location, location_attribute},
LocationStoreError,
};
use crate::error::InternalError;
use crate::location::store::diesel::models::{LocationAttributeModel, LocationModel};
use crate::location::store::{Location, LocationAttribute, LocationList};
use crate::paging::Paging;
use diesel::prelude::*;
pub(in crate::location::store::diesel) trait LocationStoreListLocationsOperation<C: Connection> {
fn list_locations(
&self,
service_id: Option<&str>,
offset: i64,
limit: i64,
) -> Result<LocationList, LocationStoreError>;
fn get_root_attributes(
conn: &C,
location_id: &str,
service_id: Option<&str>,
) -> QueryResult<Vec<LocationAttributeModel>>;
fn get_attributes(
conn: &C,
attributes: Vec<LocationAttributeModel>,
) -> Result<Vec<LocationAttribute>, LocationStoreError>;
}
#[cfg(feature = "postgres")]
impl<'a> LocationStoreListLocationsOperation<diesel::pg::PgConnection>
for LocationStoreOperations<'a, diesel::pg::PgConnection>
{
fn list_locations(
&self,
service_id: Option<&str>,
offset: i64,
limit: i64,
) -> Result<LocationList, LocationStoreError> {
self.conn.transaction::<_, LocationStoreError, _>(|| {
let mut query = location::table
.into_boxed()
.select(location::all_columns)
.limit(limit)
.offset(offset)
.filter(location::end_commit_num.eq(MAX_COMMIT_NUM));
if let Some(service_id) = service_id {
query = query.filter(location::service_id.eq(service_id));
} else {
query = query.filter(location::service_id.is_null());
}
let locs = query.load::<LocationModel>(self.conn).map_err(|err| {
LocationStoreError::InternalError(InternalError::from_source(Box::new(err)))
})?;
let mut locations = Vec::new();
for l in locs {
let loc: LocationModel = l;
let roots = Self::get_root_attributes(&*self.conn, &loc.location_id, service_id)?;
let attrs = Self::get_attributes(&*self.conn, roots)?;
locations.push(Location::from((loc, attrs)));
}
let mut count_query = location::table.into_boxed().select(location::all_columns);
if let Some(service_id) = service_id {
count_query = count_query.filter(location::service_id.eq(service_id));
} else {
count_query = count_query.filter(location::service_id.is_null());
}
let total = count_query.count().get_result(self.conn)?;
Ok(LocationList::new(
locations,
Paging::new(offset, limit, total),
))
})
}
fn get_root_attributes(
conn: &PgConnection,
location_id: &str,
service_id: Option<&str>,
) -> QueryResult<Vec<LocationAttributeModel>> {
let mut query = location_attribute::table
.into_boxed()
.select(location_attribute::all_columns)
.filter(
location_attribute::location_id
.eq(location_id)
.and(location_attribute::parent_property_name.is_null())
.and(location_attribute::end_commit_num.eq(MAX_COMMIT_NUM)),
);
if let Some(service_id) = service_id {
query = query.filter(location_attribute::service_id.eq(service_id));
} else {
query = query.filter(location_attribute::service_id.is_null());
}
query.load::<LocationAttributeModel>(conn)
}
fn get_attributes(
conn: &PgConnection,
attributes: Vec<LocationAttributeModel>,
) -> Result<Vec<LocationAttribute>, LocationStoreError> {
let mut attrs = Vec::new();
for attr in attributes {
let mut query = location_attribute::table
.into_boxed()
.select(location_attribute::all_columns)
.filter(
location_attribute::parent_property_name
.eq(&attr.parent_property_name)
.and(location_attribute::end_commit_num.eq(MAX_COMMIT_NUM)),
);
if let Some(ref service_id) = attr.service_id {
query = query.filter(location_attribute::service_id.eq(service_id));
} else {
query = query.filter(location_attribute::service_id.is_null());
}
let children = query.load(conn)?;
if children.is_empty() {
attrs.push(LocationAttribute::from(attr));
} else {
attrs.push(LocationAttribute::from((
attr,
Self::get_attributes(&conn, children)?,
)));
}
}
Ok(attrs)
}
}
#[cfg(feature = "sqlite")]
impl<'a> LocationStoreListLocationsOperation<diesel::sqlite::SqliteConnection>
for LocationStoreOperations<'a, diesel::sqlite::SqliteConnection>
{
fn list_locations(
&self,
service_id: Option<&str>,
offset: i64,
limit: i64,
) -> Result<LocationList, LocationStoreError> {
self.conn.transaction::<_, LocationStoreError, _>(|| {
let mut query = location::table
.into_boxed()
.select(location::all_columns)
.limit(limit)
.offset(offset)
.filter(location::end_commit_num.eq(MAX_COMMIT_NUM));
if let Some(service_id) = service_id {
query = query.filter(location::service_id.eq(service_id));
} else {
query = query.filter(location::service_id.is_null());
}
let locs = query.load::<LocationModel>(self.conn).map_err(|err| {
LocationStoreError::InternalError(InternalError::from_source(Box::new(err)))
})?;
let mut locations = Vec::new();
for l in locs {
let loc: LocationModel = l;
let roots = Self::get_root_attributes(&*self.conn, &loc.location_id, service_id)?;
let attrs = Self::get_attributes(&*self.conn, roots)?;
locations.push(Location::from((loc, attrs)));
}
let mut count_query = location::table.into_boxed().select(location::all_columns);
if let Some(service_id) = service_id {
count_query = count_query.filter(location::service_id.eq(service_id));
} else {
count_query = count_query.filter(location::service_id.is_null());
}
let total = count_query.count().get_result(self.conn)?;
Ok(LocationList::new(
locations,
Paging::new(offset, limit, total),
))
})
}
fn get_root_attributes(
conn: &SqliteConnection,
location_id: &str,
service_id: Option<&str>,
) -> QueryResult<Vec<LocationAttributeModel>> {
let mut query = location_attribute::table
.into_boxed()
.select(location_attribute::all_columns)
.filter(
location_attribute::location_id
.eq(location_id)
.and(location_attribute::parent_property_name.is_null())
.and(location_attribute::end_commit_num.eq(MAX_COMMIT_NUM)),
);
if let Some(service_id) = service_id {
query = query.filter(location_attribute::service_id.eq(service_id));
} else {
query = query.filter(location_attribute::service_id.is_null());
}
query.load::<LocationAttributeModel>(conn)
}
fn get_attributes(
conn: &SqliteConnection,
attributes: Vec<LocationAttributeModel>,
) -> Result<Vec<LocationAttribute>, LocationStoreError> {
let mut attrs = Vec::new();
for attr in attributes {
let mut query = location_attribute::table
.into_boxed()
.select(location_attribute::all_columns)
.filter(
location_attribute::parent_property_name
.eq(&attr.parent_property_name)
.and(location_attribute::end_commit_num.eq(MAX_COMMIT_NUM)),
);
if let Some(ref service_id) = attr.service_id {
query = query.filter(location_attribute::service_id.eq(service_id));
} else {
query = query.filter(location_attribute::service_id.is_null());
}
let children = query.load(conn)?;
if children.is_empty() {
attrs.push(LocationAttribute::from(attr));
} else {
attrs.push(LocationAttribute::from((
attr,
Self::get_attributes(&conn, children)?,
)));
}
}
Ok(attrs)
}
}
| 35.014184 | 98 | 0.574235 |
08e62c0aa3837a98a3eaa815edcd7cc999a26bbd | 3,628 | use crate::EntryType::*;
use clap::{App, Arg};
use regex::Regex;
use std::error::Error;
use walkdir::{DirEntry, WalkDir};
type MyResult<T> = Result<T, Box<dyn Error>>;
#[derive(Debug, Eq, PartialEq)]
enum EntryType {
Dir,
File,
Link,
}
#[derive(Debug)]
pub struct Config {
paths: Vec<String>,
names: Vec<Regex>,
entry_types: Vec<EntryType>,
}
// --------------------------------------------------
pub fn get_args() -> MyResult<Config> {
let matches = App::new("findr")
.version("0.1.0")
.author("Ken Youens-Clark <[email protected]>")
.about("Rust find")
.arg(
Arg::with_name("paths")
.value_name("PATH")
.help("Search paths")
.default_value(".")
.multiple(true),
)
.arg(
Arg::with_name("names")
.value_name("NAME")
.short("n")
.long("name")
.help("Name")
.takes_value(true)
.multiple(true),
)
.arg(
Arg::with_name("types")
.value_name("TYPE")
.short("t")
.long("type")
.help("Entry type")
.possible_values(&["f", "d", "l"])
.multiple(true)
.takes_value(true),
)
.get_matches();
let names = matches
.values_of_lossy("names")
.map(|vals| {
vals.into_iter()
.map(|name| {
Regex::new(&name)
.map_err(|_| format!("Invalid --name \"{}\"", name))
})
.collect::<Result<Vec<_>, _>>()
})
.transpose()?
.unwrap_or_default();
// clap should disallow anything but "d," "f," or "l"
let entry_types = matches
.values_of_lossy("types")
.map(|vals| {
vals.iter()
.map(|val| match val.as_str() {
"d" => Dir,
"f" => File,
"l" => Link,
_ => unreachable!("Invalid type"),
})
.collect()
})
.unwrap_or_default();
Ok(Config {
paths: matches.values_of_lossy("paths").unwrap(),
names,
entry_types,
})
}
// --------------------------------------------------
pub fn run(config: Config) -> MyResult<()> {
let type_filter = |entry: &DirEntry| {
config.entry_types.is_empty()
|| config
.entry_types
.iter()
.any(|entry_type| match entry_type {
Link => entry.file_type().is_symlink(),
Dir => entry.file_type().is_dir(),
File => entry.file_type().is_file(),
})
};
let name_filter = |entry: &DirEntry| {
config.names.is_empty()
|| config
.names
.iter()
.any(|re| re.is_match(&entry.file_name().to_string_lossy()))
};
for path in &config.paths {
let entries = WalkDir::new(path)
.into_iter()
.filter_map(|e| match e {
Err(e) => {
eprintln!("{}", e);
None
}
Ok(entry) => Some(entry),
})
.filter(type_filter)
.filter(name_filter)
.map(|entry| entry.path().display().to_string())
.collect::<Vec<_>>();
println!("{}", entries.join("\n"));
}
Ok(())
}
| 27.074627 | 76 | 0.415932 |
7952ba79a827f0219b023da537d77cec6e06ac55 | 636 | // Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use std::env;
use common_exception::Result;
use crate::configs::Config;
use crate::sessions::FuseQueryContext;
use crate::sessions::FuseQueryContextRef;
pub fn try_create_context() -> Result<FuseQueryContextRef> {
let mut config = Config::default();
// Setup log dir to the tests directory.
config.log_dir = env::current_dir()?
.join("../../tests/data/logs")
.display()
.to_string();
let ctx = FuseQueryContext::try_create(config)?;
ctx.with_id("2021")?;
ctx.set_max_threads(8)?;
Ok(ctx)
}
| 23.555556 | 60 | 0.668239 |
29857e6f8acbe3eab9c9676b08eb2211a6ad89e9 | 195 | #![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(improper_ctypes)]
#![allow(clippy::all)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
| 24.375 | 51 | 0.707692 |
9ca3b69912eb1eb6564a4c04878608d1518c9279 | 1,913 | // Copyright 2021 rust-multipart-rfc7578 Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng};
use std::iter::FromIterator;
/// A `BoundaryGenerator` is a policy to generate a random string to use
/// as a part boundary.
///
/// The default generator will build a random string of 6 ascii characters.
/// If you need more complexity, you can implement this, and use it with
/// [`Form::new`].
///
/// # Examples
///
/// ```
/// use common_multipart_rfc7578::client::multipart::BoundaryGenerator;
///
/// struct TestGenerator;
///
/// impl BoundaryGenerator for TestGenerator {
/// fn generate_boundary() -> String {
/// "test".to_string()
/// }
/// }
/// ```
pub trait BoundaryGenerator {
/// Generates a String to use as a boundary.
///
fn generate_boundary() -> String;
}
pub(crate) struct RandomAsciiGenerator;
impl BoundaryGenerator for RandomAsciiGenerator {
/// Creates a boundary of 6 ascii characters.
///
fn generate_boundary() -> String {
let rng = SmallRng::from_entropy();
let ascii = rng.sample_iter(&Alphanumeric);
String::from_iter(ascii.map(|b| b as char).take(6))
}
}
#[cfg(test)]
mod tests {
use super::{BoundaryGenerator, RandomAsciiGenerator};
#[test]
fn generate_random_boundary_not_empty() {
assert!(RandomAsciiGenerator::generate_boundary().len() > 0);
}
#[test]
fn generate_random_boundary_different_each_time() {
assert!(
RandomAsciiGenerator::generate_boundary() != RandomAsciiGenerator::generate_boundary()
);
}
}
| 28.552239 | 98 | 0.672243 |
4a703813aac8450c320aa415a538493f3c946b42 | 7,829 | use crate::instructions::{check_and_insert_nullifier, create_and_check_account, transfer};
use crate::poseidon_merkle_tree::processor::MerkleTreeProcessor;
use crate::poseidon_merkle_tree::state_roots::{check_root_hash_exists, MERKLE_TREE_ACC_BYTES};
use crate::state::ChecksAndTransferState;
use std::convert::{TryFrom, TryInto};
use solana_program::{
account_info::{next_account_info, AccountInfo},
msg,
program_error::ProgramError,
program_pack::Pack,
pubkey::Pubkey,
};
use ark_ed_on_bn254::FqParameters;
use ark_ff::{biginteger::BigInteger256, bytes::FromBytes, fields::FpParameters, BigInteger};
// Processor for deposit and withdraw logic.
pub fn process_instruction(
program_id: &Pubkey,
accounts: &[AccountInfo],
current_instruction_index: usize,
) -> Result<(), ProgramError> {
msg!("Entered process_instruction");
let account = &mut accounts.iter();
let signer_account = next_account_info(account)?;
let tmp_storage_pda = next_account_info(account)?;
let mut tmp_storage_pda_data = ChecksAndTransferState::unpack(&tmp_storage_pda.data.borrow())?;
// Checks whether passed-in root exists in Merkle tree history array.
// We do this check as soon as possible to avoid proof transaction invalidation for missing
// root. Currently 500 roots are stored at once. After 500 transactions roots are overwritten.
if current_instruction_index == 1 {
let merkle_tree_pda = next_account_info(account)?;
msg!(
"Passed-in merkle_tree_pda pubkey: {:?}",
*merkle_tree_pda.key
);
msg!(
"Checks against hardcoded merkle_tree_pda pubkey: {:?}",
solana_program::pubkey::Pubkey::new(&MERKLE_TREE_ACC_BYTES[..])
);
tmp_storage_pda_data.found_root = check_root_hash_exists(
merkle_tree_pda,
&tmp_storage_pda_data.root_hash,
&program_id,
)?;
}
// Checks and inserts nullifier pdas, two Merkle tree leaves (output utxo hashes),
// and executes transaction, deposit or withdrawal.
else if current_instruction_index == 1501 {
let two_leaves_pda = next_account_info(account)?;
let nullifier0_pda = next_account_info(account)?;
let nullifier1_pda = next_account_info(account)?;
let merkle_tree_pda = next_account_info(account)?;
let system_program_account = next_account_info(account)?;
msg!("Starting nullifier check.");
tmp_storage_pda_data.found_nullifier = check_and_insert_nullifier(
program_id,
signer_account,
nullifier0_pda,
system_program_account,
&tmp_storage_pda_data.proof_a_b_c_leaves_and_nullifiers[320..352],
)?;
msg!(
"nullifier0_pda inserted {}",
tmp_storage_pda_data.found_nullifier
);
tmp_storage_pda_data.found_nullifier = check_and_insert_nullifier(
program_id,
signer_account,
nullifier1_pda,
system_program_account,
&tmp_storage_pda_data.proof_a_b_c_leaves_and_nullifiers[352..384],
)?;
msg!(
"nullifier1_pda inserted {}",
tmp_storage_pda_data.found_nullifier
);
msg!("Inserting new merkle root.");
let mut merkle_tree_processor = MerkleTreeProcessor::new(Some(tmp_storage_pda), None)?;
// ext_amount includes the substracted fees
//TODO implement fees
let ext_amount =
i64::from_le_bytes(tmp_storage_pda_data.ext_amount.clone().try_into().unwrap());
// pub_amount is the public amount included in public inputs for proof verification
let pub_amount =
<BigInteger256 as FromBytes>::read(&tmp_storage_pda_data.amount[..]).unwrap();
if ext_amount > 0 {
if *merkle_tree_pda.key != solana_program::pubkey::Pubkey::new(&MERKLE_TREE_ACC_BYTES) {
msg!("Recipient has to be merkle tree account for deposit.");
return Err(ProgramError::InvalidInstructionData);
}
if pub_amount.0[1] != 0 || pub_amount.0[2] != 0 || pub_amount.0[3] != 0 {
msg!("Public amount is larger than u64.");
return Err(ProgramError::InvalidInstructionData);
}
let pub_amount_fits_i64 = i64::try_from(pub_amount.0[0]);
if pub_amount_fits_i64.is_err() == true {
msg!("Public amount is larger than i64.");
return Err(ProgramError::InvalidInstructionData);
}
if u64::try_from(ext_amount).unwrap() != pub_amount.0[0] {
msg!("ext_amount != pub_amount");
return Err(ProgramError::InvalidInstructionData);
}
msg!("Creating two_leaves_pda.");
create_and_check_account(
program_id,
signer_account,
two_leaves_pda,
system_program_account,
&tmp_storage_pda_data.proof_a_b_c_leaves_and_nullifiers[320..352],
&b"leaves"[..],
106u64, //bytes
u64::try_from(ext_amount).unwrap(), //lamports
true, //rent_exempt
)?;
msg!("Created two_leaves_pda successfully.");
msg!("Deposited {}", ext_amount);
transfer(
two_leaves_pda,
merkle_tree_pda,
u64::try_from(ext_amount).unwrap(),
)?;
} else if ext_amount <= 0 {
let recipient_account = next_account_info(account)?;
if *recipient_account.key
!= solana_program::pubkey::Pubkey::new(&tmp_storage_pda_data.to_address)
{
msg!("Recipient has to be address specified in tx integrity hash.");
return Err(ProgramError::InvalidInstructionData);
}
msg!("Creating two_leaves_pda.");
create_and_check_account(
program_id,
signer_account,
two_leaves_pda,
system_program_account,
&tmp_storage_pda_data.proof_a_b_c_leaves_and_nullifiers[320..352],
&b"leaves"[..],
106u64, //bytes
0u64, //lamports
true, //rent_exempt
)?;
msg!("Created two_leaves_pda successfully.");
// calculate ext_amount from pubAmount:
let mut field = FqParameters::MODULUS;
field.sub_noborrow(&pub_amount);
if field.0[1] != 0 || field.0[2] != 0 || field.0[3] != 0 {
msg!("Public amount is larger than u64.");
return Err(ProgramError::InvalidInstructionData);
}
let pub_amount_fits_i64 = i64::try_from(pub_amount.0[0]);
if pub_amount_fits_i64.is_err() {
msg!("Public amount is larger than i64.");
return Err(ProgramError::InvalidInstructionData);
}
// field is the positive value
let ext_amount_from_pub = field.0[0];
if u64::try_from(-ext_amount).unwrap() != ext_amount_from_pub {
msg!("ext_amount != pub_amount");
return Err(ProgramError::InvalidInstructionData);
}
transfer(merkle_tree_pda, recipient_account, ext_amount_from_pub)?;
}
//insert Merkle root
merkle_tree_processor.process_instruction(accounts)?;
}
tmp_storage_pda_data.current_instruction_index += 1;
ChecksAndTransferState::pack_into_slice(
&tmp_storage_pda_data,
&mut tmp_storage_pda.data.borrow_mut(),
);
Ok(())
}
| 40.35567 | 100 | 0.607102 |
67b27da2a4cc552bc3a9c89cc09a04eb9afcba65 | 14,215 | /*!
Compile-time implementation of event emission.
This module generates calls to `rt::emit`.
*/
use std::{collections::BTreeMap, mem};
use proc_macro2::{Span, TokenStream};
use syn::{spanned::Spanned, Attribute, Expr, ExprPath, FieldValue, Ident, Meta};
use fv_template::ct::Template;
use crate::capture::FieldValueExt;
pub(super) fn expand_tokens(input: TokenStream) -> TokenStream {
let record_ident = Ident::new(&"record", input.span());
let template = Template::parse2(input).expect("failed to expand template");
// Any field-values that aren't part of the template
let mut extra_field_values: BTreeMap<_, _> = template
.after_template_field_values()
.map(|fv| (fv.key_name().expect("expected a string key"), fv))
.collect();
let mut fields = Fields::default();
// Push the field-values that appear in the template
for fv in template.template_field_values() {
let k = fv.key_name().expect("expected a string key");
// If the hole has a corresponding field-value outside the template
// then it will be used as the source for the value and attributes
// In this case, it's expected that the field-value in the template is
// just a single identifier
let fv = match extra_field_values.remove(&k) {
Some(extra_fv) => {
if let Expr::Path(ExprPath { ref path, .. }) = fv.expr {
// Make sure the field-value in the template is just a plain identifier
assert!(fv.attrs.is_empty(), "keys that exist in the template and extra pairs should only use attributes on the extra pair");
assert_eq!(
path.get_ident().map(|ident| ident.to_string()).as_ref(),
Some(&k),
"the key name and path don't match"
);
} else {
panic!("keys that exist in the template and extra pairs should only use identifiers");
}
extra_fv
}
None => fv,
};
fields.push(k, fv.clone());
}
// Push any remaining extra field-values
// This won't include any field values that also appear in the template
for (k, fv) in extra_field_values {
fields.push(k, fv.clone());
}
// The log target expression
let target_tokens = template
.before_template_field_values()
.find(|fv| {
fv.key_name()
.map(|k| k.as_str() == "target")
.unwrap_or(false)
})
.map(|fv| {
let target = &fv.expr;
quote!(Some(#target))
})
.unwrap_or_else(|| quote!(None));
// A runtime representation of the template
let template_tokens = template.to_rt_tokens_with_visitor(
quote!(emit::rt::__private),
CfgVisitor(|label: &str| {
fields
.sorted_fields
.get(label)
.and_then(|field| field.cfg_attr.as_ref())
}),
);
let field_match_value_tokens = fields.match_value_tokens();
let field_match_binding_tokens = fields.match_binding_tokens();
let field_record_tokens = fields.sorted_field_record_tokens();
let field_cfg_tokens = fields.sorted_field_cfg_tokens();
let field_key_tokens = fields.sorted_field_key_tokens();
let field_value_tokens = fields.sorted_field_value_tokens();
quote!({
extern crate emit;
match (#(#field_match_value_tokens),*) {
(#(#field_match_binding_tokens),*) => {
let kvs = emit::rt::__private::KeyValues {
sorted_key_values: &[#(#field_record_tokens),*]
};
let template = #template_tokens;
let #record_ident = emit::rt::__private::Record {
kvs,
template,
};
emit::rt::__private_forward!({
target: #target_tokens,
key_value_cfgs: [#(#field_cfg_tokens),*],
keys: [#(#field_key_tokens),*],
values: [#(#field_value_tokens),*],
record: &record,
});
}
}
})
}
#[derive(Default)]
struct Fields {
match_value_tokens: Vec<TokenStream>,
match_binding_tokens: Vec<TokenStream>,
sorted_fields: BTreeMap<String, SortedField>,
field_index: usize,
}
struct SortedField {
field_key_tokens: TokenStream,
field_record_tokens: TokenStream,
field_value_tokens: TokenStream,
cfg_attr: Option<Attribute>,
}
impl Fields {
fn match_value_tokens(&self) -> impl Iterator<Item = &TokenStream> {
self.match_value_tokens.iter()
}
fn match_binding_tokens(&self) -> impl Iterator<Item = &TokenStream> {
self.match_binding_tokens.iter()
}
fn sorted_field_key_tokens(&self) -> impl Iterator<Item = &TokenStream> {
self.sorted_fields
.values()
.map(|field| &field.field_key_tokens)
}
fn sorted_field_record_tokens(&self) -> impl Iterator<Item = &TokenStream> {
self.sorted_fields
.values()
.map(|field| &field.field_record_tokens)
}
fn sorted_field_value_tokens(&self) -> impl Iterator<Item = &TokenStream> {
self.sorted_fields
.values()
.map(|field| &field.field_value_tokens)
}
fn sorted_field_cfg_tokens(&'_ self) -> impl Iterator<Item = TokenStream> + '_ {
self.sorted_fields.values().map(|field| {
field
.cfg_attr
.as_ref()
.map(|cfg_attr| quote!(#cfg_attr))
.unwrap_or_else(|| quote!(#[cfg(not(emit_rt__private_false))]))
})
}
fn next_ident(&mut self, span: Span) -> Ident {
let i = Ident::new(&format!("__tmp{}", self.field_index), span);
self.field_index += 1;
i
}
fn push(&mut self, label: String, mut fv: FieldValue) {
let mut attrs = vec![];
let mut cfg_attr = None;
for attr in mem::take(&mut fv.attrs) {
if attr.is_cfg() {
assert!(
cfg_attr.is_none(),
"only a single #[cfg] is supported on fields"
);
cfg_attr = Some(attr);
} else {
attrs.push(attr);
}
}
let v = self.next_ident(fv.span());
// NOTE: We intentionally wrap the expression in layers of blocks
self.match_value_tokens.push(
quote_spanned!(fv.span()=> #cfg_attr { #(#attrs)* emit::ct::__private_capture!(#fv) }),
);
// If there's a #[cfg] then also push its reverse
// This is to give a dummy value to the pattern binding since they don't support attributes
if let Some(cfg_attr) = &cfg_attr {
let cfg_attr = cfg_attr.invert_cfg().expect("attribute is not a #[cfg]");
self.match_value_tokens
.push(quote_spanned!(fv.span()=> #cfg_attr ()));
}
self.match_binding_tokens.push(quote!(#v));
// Make sure keys aren't duplicated
let previous = self.sorted_fields.insert(
label.clone(),
SortedField {
field_key_tokens: quote_spanned!(fv.span()=> #cfg_attr #label),
field_record_tokens: quote_spanned!(fv.span()=> #cfg_attr #v.clone()),
field_value_tokens: quote_spanned!(fv.span()=> #cfg_attr &#v),
cfg_attr,
},
);
if previous.is_some() {
panic!("keys cannot be duplicated");
}
}
}
struct CfgVisitor<F>(F);
impl<'a, F> fv_template::ct::Visitor for CfgVisitor<F>
where
F: Fn(&str) -> Option<&'a Attribute> + 'a,
{
fn visit_hole(&mut self, label: &str, hole: TokenStream) -> TokenStream {
match (self.0)(label) {
Some(cfg_attr) => {
quote!(#cfg_attr #hole)
}
_ => hole,
}
}
}
pub(super) trait AttributeExt {
fn is_cfg(&self) -> bool;
fn invert_cfg(&self) -> Option<Attribute>;
}
impl AttributeExt for Attribute {
fn is_cfg(&self) -> bool {
if let Some(ident) = self.path.get_ident() {
ident == "cfg"
} else {
false
}
}
fn invert_cfg(&self) -> Option<Attribute> {
match self.path.get_ident() {
Some(ident) if ident == "cfg" => match self.parse_meta() {
Ok(Meta::List(list)) => {
let inner = list.nested;
Some(Attribute {
pound_token: self.pound_token.clone(),
style: self.style.clone(),
bracket_token: self.bracket_token.clone(),
path: self.path.clone(),
tokens: quote!((not(#inner))),
})
}
_ => None,
},
_ => None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[rustfmt::skip]
fn expand_emit() {
let cases = vec![
(
quote!("Text and {b: 17} and {a} and {#[as_debug] c} and {d: String::from(\"short lived\")} and {#[cfg(disabled)] e}"),
quote!({
extern crate emit;
match (
{emit::ct::__private_capture!(b: 17) },
{emit::ct::__private_capture!(a) },
{
#[as_debug]
emit::ct::__private_capture!(c)
},
{emit::ct::__private_capture!(d: String::from("short lived")) },
#[cfg(disabled)]
{emit::ct::__private_capture!(e) },
#[cfg(not(disabled))]
()
) {
(__tmp0, __tmp1, __tmp2, __tmp3, __tmp4) => {
let kvs = emit::rt::__private::KeyValues {
sorted_key_values: &[
__tmp1.clone(),
__tmp0.clone(),
__tmp2.clone(),
__tmp3.clone(),
#[cfg(disabled)]
__tmp4.clone()
]
};
let template = emit::rt::__private::template(&[
emit::rt::__private::Part::Text("Text and "),
emit::rt::__private::Part::Hole ( "b"),
emit::rt::__private::Part::Text(" and "),
emit::rt::__private::Part::Hole ( "a"),
emit::rt::__private::Part::Text(" and "),
emit::rt::__private::Part::Hole ( "c" ),
emit::rt::__private::Part::Text(" and "),
emit::rt::__private::Part::Hole ( "d" ),
emit::rt::__private::Part::Text(" and "),
#[cfg(disabled)]
emit::rt::__private::Part::Hole ( "e" )
]);
let record = emit::rt::__private::Record {
kvs,
template,
};
emit::rt::__private_forward!({
target: None,
key_value_cfgs: [
#[cfg(not(emit_rt__private_false))],
#[cfg(not(emit_rt__private_false))],
#[cfg(not(emit_rt__private_false))],
#[cfg(not(emit_rt__private_false))],
#[cfg(disabled)]
],
keys: ["a", "b", "c", "d", #[cfg(disabled)] "e"],
values: [&__tmp1, &__tmp0, &__tmp2, &__tmp3, #[cfg(disabled)] &__tmp4],
record: &record,
});
}
}
}),
),
(
quote!(target: log, "Text and {a}", a: 42),
quote!({
extern crate emit;
match (
{ emit::ct::__private_capture!(a: 42) }
) {
(__tmp0) => {
let kvs = emit::rt::__private::KeyValues {
sorted_key_values: &[__tmp0.clone()]
};
let template = emit::rt::__private::template(&[
emit::rt::__private::Part::Text("Text and "),
emit::rt::__private::Part::Hole ( "a")
]);
let record = emit::rt::__private::Record {
kvs,
template,
};
emit::rt::__private_forward!({
target: Some(log),
key_value_cfgs: [
#[cfg(not(emit_rt__private_false))]
],
keys: ["a"],
values: [&__tmp0],
record: &record,
});
}
}
})
)
];
for (expr, expected) in cases {
let actual = expand_tokens(expr);
assert_eq!(expected.to_string(), actual.to_string());
}
}
}
| 35.098765 | 145 | 0.458881 |
dbad56e03916620629def5e731ad3272c4509426 | 16,475 | #[doc = r"Value read from the register"]
pub struct R {
bits: u8,
}
#[doc = r"Value to write to the register"]
pub struct W {
bits: u8,
}
impl super::RXCSRH4 {
#[doc = r"Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
self.register.set(f(&R { bits }, &mut W { bits }).bits);
}
#[doc = r"Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r"Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
self.register.set(
f(&mut W {
bits: Self::reset_value(),
})
.bits,
);
}
#[doc = r"Reset value of the register"]
#[inline(always)]
pub const fn reset_value() -> u8 {
0
}
#[doc = r"Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.register.set(Self::reset_value())
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_INCOMPRXR {
bits: bool,
}
impl USB_RXCSRH4_INCOMPRXR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_INCOMPRXW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_INCOMPRXW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 0);
self.w.bits |= ((value as u8) & 1) << 0;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_DTR {
bits: bool,
}
impl USB_RXCSRH4_DTR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_DTW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_DTW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 1);
self.w.bits |= ((value as u8) & 1) << 1;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_DTWER {
bits: bool,
}
impl USB_RXCSRH4_DTWER {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_DTWEW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_DTWEW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 2);
self.w.bits |= ((value as u8) & 1) << 2;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_DMAMODR {
bits: bool,
}
impl USB_RXCSRH4_DMAMODR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_DMAMODW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_DMAMODW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 3);
self.w.bits |= ((value as u8) & 1) << 3;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_PIDERRR {
bits: bool,
}
impl USB_RXCSRH4_PIDERRR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_PIDERRW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_PIDERRW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 4);
self.w.bits |= ((value as u8) & 1) << 4;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_DMAENR {
bits: bool,
}
impl USB_RXCSRH4_DMAENR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_DMAENW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_DMAENW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 5);
self.w.bits |= ((value as u8) & 1) << 5;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_AUTORQR {
bits: bool,
}
impl USB_RXCSRH4_AUTORQR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_AUTORQW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_AUTORQW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 6);
self.w.bits |= ((value as u8) & 1) << 6;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_AUTOCLR {
bits: bool,
}
impl USB_RXCSRH4_AUTOCLR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_AUTOCLW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_AUTOCLW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 7);
self.w.bits |= ((value as u8) & 1) << 7;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_DISNYETR {
bits: bool,
}
impl USB_RXCSRH4_DISNYETR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_DISNYETW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_DISNYETW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 4);
self.w.bits |= ((value as u8) & 1) << 4;
self.w
}
}
#[doc = r"Value of the field"]
pub struct USB_RXCSRH4_ISOR {
bits: bool,
}
impl USB_RXCSRH4_ISOR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _USB_RXCSRH4_ISOW<'a> {
w: &'a mut W,
}
impl<'a> _USB_RXCSRH4_ISOW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 6);
self.w.bits |= ((value as u8) & 1) << 6;
self.w
}
}
impl R {
#[doc = r"Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
#[doc = "Bit 0 - Incomplete RX Transmission Status"]
#[inline(always)]
pub fn usb_rxcsrh4_incomprx(&self) -> USB_RXCSRH4_INCOMPRXR {
let bits = ((self.bits >> 0) & 1) != 0;
USB_RXCSRH4_INCOMPRXR { bits }
}
#[doc = "Bit 1 - Data Toggle"]
#[inline(always)]
pub fn usb_rxcsrh4_dt(&self) -> USB_RXCSRH4_DTR {
let bits = ((self.bits >> 1) & 1) != 0;
USB_RXCSRH4_DTR { bits }
}
#[doc = "Bit 2 - Data Toggle Write Enable"]
#[inline(always)]
pub fn usb_rxcsrh4_dtwe(&self) -> USB_RXCSRH4_DTWER {
let bits = ((self.bits >> 2) & 1) != 0;
USB_RXCSRH4_DTWER { bits }
}
#[doc = "Bit 3 - DMA Request Mode"]
#[inline(always)]
pub fn usb_rxcsrh4_dmamod(&self) -> USB_RXCSRH4_DMAMODR {
let bits = ((self.bits >> 3) & 1) != 0;
USB_RXCSRH4_DMAMODR { bits }
}
#[doc = "Bit 4 - PID Error"]
#[inline(always)]
pub fn usb_rxcsrh4_piderr(&self) -> USB_RXCSRH4_PIDERRR {
let bits = ((self.bits >> 4) & 1) != 0;
USB_RXCSRH4_PIDERRR { bits }
}
#[doc = "Bit 5 - DMA Request Enable"]
#[inline(always)]
pub fn usb_rxcsrh4_dmaen(&self) -> USB_RXCSRH4_DMAENR {
let bits = ((self.bits >> 5) & 1) != 0;
USB_RXCSRH4_DMAENR { bits }
}
#[doc = "Bit 6 - Auto Request"]
#[inline(always)]
pub fn usb_rxcsrh4_autorq(&self) -> USB_RXCSRH4_AUTORQR {
let bits = ((self.bits >> 6) & 1) != 0;
USB_RXCSRH4_AUTORQR { bits }
}
#[doc = "Bit 7 - Auto Clear"]
#[inline(always)]
pub fn usb_rxcsrh4_autocl(&self) -> USB_RXCSRH4_AUTOCLR {
let bits = ((self.bits >> 7) & 1) != 0;
USB_RXCSRH4_AUTOCLR { bits }
}
#[doc = "Bit 4 - Disable NYET"]
#[inline(always)]
pub fn usb_rxcsrh4_disnyet(&self) -> USB_RXCSRH4_DISNYETR {
let bits = ((self.bits >> 4) & 1) != 0;
USB_RXCSRH4_DISNYETR { bits }
}
#[doc = "Bit 6 - Isochronous Transfers"]
#[inline(always)]
pub fn usb_rxcsrh4_iso(&self) -> USB_RXCSRH4_ISOR {
let bits = ((self.bits >> 6) & 1) != 0;
USB_RXCSRH4_ISOR { bits }
}
}
impl W {
#[doc = r"Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u8) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Incomplete RX Transmission Status"]
#[inline(always)]
pub fn usb_rxcsrh4_incomprx(&mut self) -> _USB_RXCSRH4_INCOMPRXW {
_USB_RXCSRH4_INCOMPRXW { w: self }
}
#[doc = "Bit 1 - Data Toggle"]
#[inline(always)]
pub fn usb_rxcsrh4_dt(&mut self) -> _USB_RXCSRH4_DTW {
_USB_RXCSRH4_DTW { w: self }
}
#[doc = "Bit 2 - Data Toggle Write Enable"]
#[inline(always)]
pub fn usb_rxcsrh4_dtwe(&mut self) -> _USB_RXCSRH4_DTWEW {
_USB_RXCSRH4_DTWEW { w: self }
}
#[doc = "Bit 3 - DMA Request Mode"]
#[inline(always)]
pub fn usb_rxcsrh4_dmamod(&mut self) -> _USB_RXCSRH4_DMAMODW {
_USB_RXCSRH4_DMAMODW { w: self }
}
#[doc = "Bit 4 - PID Error"]
#[inline(always)]
pub fn usb_rxcsrh4_piderr(&mut self) -> _USB_RXCSRH4_PIDERRW {
_USB_RXCSRH4_PIDERRW { w: self }
}
#[doc = "Bit 5 - DMA Request Enable"]
#[inline(always)]
pub fn usb_rxcsrh4_dmaen(&mut self) -> _USB_RXCSRH4_DMAENW {
_USB_RXCSRH4_DMAENW { w: self }
}
#[doc = "Bit 6 - Auto Request"]
#[inline(always)]
pub fn usb_rxcsrh4_autorq(&mut self) -> _USB_RXCSRH4_AUTORQW {
_USB_RXCSRH4_AUTORQW { w: self }
}
#[doc = "Bit 7 - Auto Clear"]
#[inline(always)]
pub fn usb_rxcsrh4_autocl(&mut self) -> _USB_RXCSRH4_AUTOCLW {
_USB_RXCSRH4_AUTOCLW { w: self }
}
#[doc = "Bit 4 - Disable NYET"]
#[inline(always)]
pub fn usb_rxcsrh4_disnyet(&mut self) -> _USB_RXCSRH4_DISNYETW {
_USB_RXCSRH4_DISNYETW { w: self }
}
#[doc = "Bit 6 - Isochronous Transfers"]
#[inline(always)]
pub fn usb_rxcsrh4_iso(&mut self) -> _USB_RXCSRH4_ISOW {
_USB_RXCSRH4_ISOW { w: self }
}
}
| 26.788618 | 70 | 0.54088 |
502383280c17bf171bb9cba11631e7e188dfc306 | 34,143 | //! Composes two operations together.
use super::doc::*;
use crate::stepper::*;
use std::cmp;
fn compose_del_del_inner<S: Schema>(
res: &mut DelSpan<S>,
a: &mut DelStepper<S>,
b: &mut DelStepper<S>,
) {
while !a.is_done() && !b.is_done() {
match a.get_head() {
DelSkip(acount) => {
match b.head.clone() {
Some(DelSkip(bcount)) => {
res.place(&DelSkip(cmp::min(acount, bcount)));
if acount > bcount {
a.head = Some(DelSkip(acount - bcount));
b.next();
} else if acount < bcount {
b.head = Some(DelSkip(bcount - acount));
a.next();
} else {
a.next();
b.next();
}
}
// Some(DelObject) |
Some(DelWithGroup(..)) | Some(DelGroup(..)) => {
if acount > 1 {
a.head = Some(DelSkip(acount - 1));
} else {
a.next();
}
res.place(&b.next().unwrap());
}
Some(DelText(bcount)) => {
res.place(&DelText(cmp::min(acount, bcount)));
if acount > bcount {
a.head = Some(DelSkip(acount - bcount));
b.next();
} else if acount < bcount {
b.head = Some(DelText(bcount - acount));
a.next();
} else {
a.next();
b.next();
}
}
Some(DelStyles(b_count, b_styles)) => {
res.place(&DelText(cmp::min(acount, b_count)));
if acount > b_count {
a.head = Some(DelSkip(acount - b_count));
b.next();
} else if acount < b_count {
b.head = Some(DelStyles(b_count - acount, b_styles));
a.next();
} else {
a.next();
b.next();
}
}
None => {
res.place(&a.next().unwrap());
} // Some(DelMany(bcount)) => {
// res.place(&DelMany(cmp::min(acount, bcount)));
// if acount > bcount {
// a.head = Some(DelSkip(acount - bcount));
// b.next();
// } else if acount < bcount {
// b.head = Some(DelMany(bcount - acount));
// a.next();
// } else {
// a.next();
// b.next();
// }
// }
// Some(DelGroupAll) => {
// if acount > 1 {
// a.head = Some(DelSkip(acount - 1));
// } else {
// a.next();
// }
// res.place(&b.next().unwrap());
// }
}
}
DelStyles(a_count, a_styles) => match b.head.clone() {
Some(DelStyles(b_count, b_styles)) => {
let mut both_styles = b_styles.clone();
both_styles.extend(&a_styles);
res.push(DelStyles(cmp::min(a_count, b_count), both_styles));
if a_count > b_count {
b.head = Some(DelStyles(a_count - b_count, a_styles));
b.next();
} else if a_count < b_count {
a.head = Some(DelStyles(b_count - a_count, b_styles));
a.next();
} else {
a.next();
b.next();
}
}
Some(DelSkip(b_count)) => {
res.push(DelStyles(cmp::min(a_count, b_count), a_styles.clone()));
if a_count > b_count {
b.head = Some(DelSkip(a_count - b_count));
b.next();
} else if a_count < b_count {
a.head = Some(DelStyles(b_count - a_count, a_styles));
a.next();
} else {
a.next();
b.next();
}
}
Some(DelWithGroup(..)) | Some(DelGroup(..)) => {
unreachable!();
}
Some(DelText(b_count)) => {
res.place(&DelText(cmp::min(a_count, b_count)));
if a_count > b_count {
a.head = Some(DelStyles(a_count - b_count, a_styles));
b.next();
} else if a_count < b_count {
b.head = Some(DelText(b_count - a_count));
a.next();
} else {
a.next();
b.next();
}
}
None => {
res.place(&a.next().unwrap());
}
},
DelWithGroup(ref span) => {
match b.head.clone() {
Some(DelSkip(bcount)) => {
if bcount > 1 {
b.head = Some(DelSkip(bcount - 1));
} else {
b.next();
}
res.place(&a.next().unwrap());
}
Some(DelStyles(..)) => {
panic!("DelWithGroup vs DelStyles is bad");
}
Some(DelWithGroup(ref bspan)) => {
res.place(&DelWithGroup(compose_del_del(span, bspan)));
a.next();
b.next();
}
Some(DelGroup(ref bspan)) => {
res.place(&DelGroup(compose_del_del(span, bspan)));
a.next();
b.next();
}
Some(DelText(..)) => {
panic!("DelWithGroup vs DelText is bad");
}
None => {
res.place(&a.next().unwrap());
} // Some(DelMany(bcount)) => {
// if bcount > 1 {
// b.head = Some(DelMany(bcount - 1));
// } else {
// b.next();
// }
// a.next();
// res.place(&DelMany(1));
// }
// Some(DelObject) => {
// panic!("DelWithGroup vs DelObject is bad");
// }
// Some(DelGroupAll) => {
// a.next();
// res.place(&b.next().unwrap());
// }
}
}
DelGroup(ref span) => {
let mut c = DelStepper::new(span);
let mut inner: DelSpan<S> = vec![];
compose_del_del_inner(&mut inner, &mut c, b);
if !c.is_done() {
inner.place(&c.head.unwrap());
inner.place_all(&c.rest);
}
res.place(&DelGroup(inner));
a.next();
}
DelText(count) => {
res.place(&DelText(count));
a.next();
} // DelObject => {
// match b.head.clone() {
// Some(DelObject) => {
// res.place(&DelObject);
// a.next();
// b.next();
// }
// None => {
// res.place(&DelObject);
// a.next();
// }
// _ => {
// panic!("Invalid compose against DelObject");
// }
// }
// }
// DelMany(count) => {
// res.place(&DelMany(count));
// a.next();
// }
// DelGroupAll => {
// res.place(&DelGroupAll);
// a.next();
// }
}
}
}
pub fn compose_del_del<S: Schema>(avec: &DelSpan<S>, bvec: &DelSpan<S>) -> DelSpan<S> {
let mut res = Vec::with_capacity(avec.len() + bvec.len());
let mut a = DelStepper::new(avec);
let mut b = DelStepper::new(bvec);
compose_del_del_inner(&mut res, &mut a, &mut b);
if !a.is_done() {
res.place(&a.get_head());
res.place_all(&a.rest);
}
if !b.is_done() {
res.place(&b.get_head());
res.place_all(&b.rest);
}
res
}
fn compose_add_add_inner<S: Schema>(
res: &mut AddSpan<S>,
a: &mut AddStepper<S>,
b: &mut AddStepper<S>,
) {
while !b.is_done() && !a.is_done() {
match b.get_head() {
AddText(..) => {
res.place(&b.next().unwrap());
}
AddStyles(b_count, b_styles) => match a.get_head() {
AddStyles(a_count, a_styles) => {
let mut both_styles = b_styles.clone();
both_styles.extend(&a_styles);
res.push(AddStyles(cmp::min(a_count, b_count), both_styles));
if a_count > b_count {
b.head = Some(AddStyles(a_count - b_count, a_styles));
b.next();
} else if a_count < b_count {
a.head = Some(AddStyles(b_count - a_count, b_styles));
a.next();
} else {
a.next();
b.next();
}
}
AddText(mut styles, value) => {
if b_count < value.char_len() {
let (a_left, a_right) = value.split_at(b_count);
let mut left_styles = styles.clone();
left_styles.extend(&b_styles);
res.place(&AddText(left_styles, a_left));
a.head = Some(AddText(styles, a_right));
b.next();
} else if b_count > value.char_len() {
styles.extend(&b_styles);
b.head = Some(AddStyles(b_count - value.char_len(), b_styles));
res.place(&AddText(styles, value));
a.next();
} else {
styles.extend(&b_styles);
res.place(&AddText(styles, value));
a.next();
b.next();
}
}
AddSkip(acount) => {
res.push(AddStyles(cmp::min(acount, b_count), b_styles.clone()));
if acount > b_count {
b.head = Some(AddSkip(acount - b_count));
b.next();
} else if acount < b_count {
a.head = Some(AddStyles(b_count - acount, b_styles));
a.next();
} else {
a.next();
b.next();
}
}
AddWithGroup(..) => {
res.push(a.next().unwrap());
if b_count == 1 {
b.next();
} else {
b.head = Some(AddSkip(b_count - 1));
}
}
AddGroup(..) => {
res.push(a.next().unwrap());
if b_count == 1 {
b.next();
} else {
b.head = Some(AddSkip(b_count - 1));
}
}
},
AddSkip(bcount) => match a.get_head() {
AddStyles(acount, a_styles) => {
res.push(AddStyles(cmp::min(acount, bcount), a_styles.clone()));
if acount > bcount {
a.head = Some(AddStyles(acount - bcount, a_styles));
b.next();
} else if acount < bcount {
b.head = Some(AddSkip(bcount - acount));
a.next();
} else {
a.next();
b.next();
}
}
AddText(styles, value) => {
if bcount < value.char_len() {
let (a_left, a_right) = value.split_at(bcount);
res.place(&AddText(styles.clone(), a_left));
a.head = Some(AddText(styles, a_right));
b.next();
} else if bcount > value.char_len() {
res.place(&a.next().unwrap());
b.head = Some(AddSkip(bcount - value.char_len()));
} else {
res.place(&a.get_head());
a.next();
b.next();
}
}
AddSkip(acount) => {
res.push(AddSkip(cmp::min(acount, bcount)));
if acount > bcount {
a.head = Some(AddSkip(acount - bcount));
b.next();
} else if acount < bcount {
b.head = Some(AddSkip(bcount - acount));
a.next();
} else {
a.next();
b.next();
}
}
AddWithGroup(_span) => {
res.push(a.next().unwrap());
if bcount == 1 {
b.next();
} else {
b.head = Some(AddSkip(bcount - 1));
}
}
AddGroup(..) => {
res.push(a.next().unwrap());
if bcount == 1 {
b.next();
} else {
b.head = Some(AddSkip(bcount - 1));
}
}
},
AddGroup(attrs, bspan) => {
let mut c = AddStepper::new(&bspan);
let mut inner = vec![];
compose_add_add_inner(&mut inner, a, &mut c);
if !c.is_done() {
inner.place(&c.get_head());
inner.place_all(&c.rest);
}
res.push(AddGroup(attrs.clone(), inner));
b.next();
}
AddWithGroup(ref bspan) => match a.get_head() {
AddText(..) => {
panic!("Cannot compose AddWithGroup with AddText");
}
AddStyles(..) => {
panic!("Cannot compose AddWithGroup with AddStyles");
}
AddSkip(acount) => {
if acount == 1 {
a.next();
} else {
a.head = Some(AddSkip(acount - 1));
}
res.push(b.next().unwrap());
}
AddWithGroup(ref aspan) => {
res.push(AddWithGroup(compose_add_add(aspan, bspan)));
a.next();
b.next();
}
AddGroup(ref attrs, ref aspan) => {
res.push(AddGroup(attrs.clone(), compose_add_add(aspan, bspan)));
a.next();
b.next();
}
},
}
}
}
pub fn compose_add_add<S: Schema>(avec: &AddSpan<S>, bvec: &AddSpan<S>) -> AddSpan<S> {
let mut res = Vec::with_capacity(avec.len() + bvec.len());
let mut a = AddStepper::new(avec);
let mut b = AddStepper::new(bvec);
compose_add_add_inner(&mut res, &mut a, &mut b);
if !b.is_done() {
res.place(&b.get_head());
res.place_all(&b.rest);
}
if !a.is_done() {
res.place(&a.get_head());
res.place_all(&a.rest);
}
res
}
pub fn compose_add_del<S: Schema>(avec: &AddSpan<S>, bvec: &DelSpan<S>) -> Op<S> {
let mut delres: DelSpan<S> = Vec::with_capacity(avec.len() + bvec.len());
let mut addres: AddSpan<S> = Vec::with_capacity(avec.len() + bvec.len());
let mut a = AddStepper::new(avec);
let mut b = DelStepper::new(bvec);
compose_add_del_inner(&mut delres, &mut addres, &mut a, &mut b);
if !b.is_done() {
let rest = b.into_span();
if rest.skip_post_len() > 0 {
addres.place(&AddSkip(rest.skip_post_len()));
}
delres.place_all(&rest);
}
if !a.is_done() {
let rest = a.into_span();
if rest.skip_pre_len() > 0 {
delres.place(&DelSkip(rest.skip_pre_len()));
}
addres.place_all(&rest);
}
Op(delres, addres)
}
fn compose_add_del_inner<S: Schema>(
delres: &mut DelSpan<S>,
addres: &mut AddSpan<S>,
a: &mut AddStepper<S>,
b: &mut DelStepper<S>,
) {
while !b.is_done() && !a.is_done() {
match b.get_head() {
DelText(bcount) => match a.get_head() {
AddText(a_styles, avalue) => {
if bcount < avalue.char_len() {
let (_a_left, a_right) = avalue.split_at(bcount);
a.head = Some(AddText(a_styles, a_right));
b.next();
} else if bcount > avalue.char_len() {
a.next();
b.head = Some(DelText(bcount - avalue.char_len()));
} else {
a.next();
b.next();
}
}
AddSkip(acount) => {
if bcount < acount {
a.head = Some(AddSkip(acount - bcount));
delres.place(&b.next().unwrap());
} else if bcount > acount {
a.next();
delres.place(&DelText(acount));
b.head = Some(DelText(bcount - acount));
} else {
a.next();
delres.place(&b.next().unwrap());
}
}
_ => {
panic!("Unimplemented or Unexpected");
}
},
DelStyles(b_count, b_styles) => match a.get_head() {
AddText(mut a_styles, a_value) => {
if b_count < a_value.char_len() {
let (a_left, a_right) = a_value.split_at(b_count);
let mut a_left_styles = a_styles.clone();
a_left_styles.remove(&b_styles);
addres.place(&AddText(a_left_styles, a_left));
a.head = Some(AddText(a_styles, a_right));
b.next();
} else if b_count > a_value.char_len() {
a_styles.remove(&b_styles);
b.head = Some(DelSkip(b_count - a_value.char_len()));
addres.place(&AddText(a_styles, a_value));
} else {
a_styles.remove(&b_styles);
addres.place(&AddText(a_styles, a_value));
a.next();
b.next();
}
}
AddStyles(a_count, a_styles) => {
// a_styles - b_styles
let mut combined_styles = a_styles.clone();
combined_styles.remove(&b_styles);
// res.push(AddStyles(cmp::min(a_count, b_count), both_styles));
if a_count > b_count {
a.head = Some(AddStyles(a_count - b_count, a_styles));
a.next();
addres.place(&AddStyles(b_count, combined_styles));
} else if a_count < b_count {
b.head = Some(DelStyles(b_count - a_count, b_styles));
a.next();
addres.place(&AddStyles(b_count, combined_styles));
} else {
a.next();
b.next();
addres.place(&AddStyles(a_count, combined_styles));
}
delres.place(&b.next().unwrap());
}
AddSkip(a_count) => {
addres.place(&AddSkip(cmp::min(a_count, b_count)));
delres.place(&DelStyles(cmp::min(a_count, b_count), b_styles.clone()));
if a_count > b_count {
a.head = Some(AddSkip(a_count - b_count));
b.next();
} else if a_count < b_count {
a.next();
b.head = Some(DelStyles(b_count - a_count, b_styles.clone()));
} else {
a.next();
b.next();
}
}
AddWithGroup(..) => {
panic!("DelStyles by AddWithGroup is ILLEGAL");
}
AddGroup(..) => {
panic!("DelStyles by AddGroup is ILLEGAL");
}
},
DelSkip(bcount) => match a.get_head() {
AddText(a_styles, avalue) => {
if bcount < avalue.char_len() {
let (a_left, a_right) = avalue.split_at(bcount);
addres.place(&AddText(a_styles.clone(), a_left));
a.head = Some(AddText(a_styles, a_right));
b.next();
} else if bcount > avalue.char_len() {
addres.place(&a.next().unwrap());
b.head = Some(DelSkip(bcount - avalue.char_len()));
} else {
addres.place(&a.get_head());
a.next();
b.next();
}
}
AddStyles(a_count, a_styles) => {
addres.place(&AddStyles(cmp::min(a_count, bcount), a_styles.clone()));
delres.place(&DelSkip(cmp::min(a_count, bcount)));
if a_count > bcount {
a.head = Some(AddStyles(a_count - bcount, a_styles));
b.next();
} else if a_count < bcount {
a.next();
b.head = Some(DelSkip(bcount - a_count));
} else {
a.next();
b.next();
}
}
AddSkip(acount) => {
addres.place(&AddSkip(cmp::min(acount, bcount)));
delres.place(&DelSkip(cmp::min(acount, bcount)));
if acount > bcount {
a.head = Some(AddSkip(acount - bcount));
b.next();
} else if acount < bcount {
a.next();
b.head = Some(DelSkip(bcount - acount));
} else {
a.next();
b.next();
}
}
AddWithGroup(..) => {
addres.place(&a.next().unwrap());
delres.place(&DelSkip(1));
if bcount == 1 {
b.next();
} else {
b.head = Some(DelSkip(bcount - 1));
}
}
AddGroup(_, aspan) => {
addres.place(&a.next().unwrap());
if aspan.skip_pre_len() > 0 {
delres.place(&DelSkip(aspan.skip_pre_len()));
}
if bcount == 1 {
b.next();
} else {
b.head = Some(DelSkip(bcount - 1));
}
}
},
DelWithGroup(span) => match a.get_head() {
AddText(..) => {
panic!("DelWithGroup by AddText is ILLEGAL");
}
AddStyles(..) => {
panic!("DelWithGroup by AddStyles is ILLEGAL");
}
AddSkip(acount) => {
delres.place(&b.next().unwrap());
addres.place(&AddSkip(1));
if acount > 1 {
a.head = Some(AddSkip(acount - 1));
} else {
a.next();
}
}
AddWithGroup(insspan) => {
a.next();
b.next();
let Op(del, ins) = compose_add_del(&insspan, &span);
delres.place(&DelWithGroup(del));
addres.place(&AddWithGroup(ins));
}
AddGroup(attr, insspan) => {
a.next();
b.next();
let Op(del, ins) = compose_add_del(&insspan, &span);
addres.place(&AddGroup(attr, ins));
delres.place_all(&del);
}
},
DelGroup(span) => {
match a.get_head() {
AddText(..) => {
panic!("DelGroup by AddText is ILLEGAL");
}
AddStyles(..) => {
panic!("DelGroup by AddStyles is ILLEGAL");
}
AddSkip(acount) => {
delres.place(&b.next().unwrap());
if span.skip_post_len() > 0 {
addres.place(&AddSkip(span.skip_post_len()));
}
if acount > 1 {
a.head = Some(AddSkip(acount - 1));
} else {
a.next();
}
}
AddWithGroup(insspan) => {
a.next();
b.next();
let Op(del, ins) = compose_add_del(&insspan, &span);
delres.place(&DelGroup(del));
addres.place_all(&ins[..]);
// let mut a_stepper = AddStepper::new(&insspan);
// let mut b_stepper = DelStepper::new(&span);
// let mut del_inner = vec![];
// compose_add_del_inner(&mut del_inner, addres, &mut a_stepper, &mut b_stepper);
// if !b_stepper.is_done() {
// del_inner.place_all(&b_stepper.into_span());
// }
// if !a_stepper.is_done() {
// let rest = a_stepper.into_span();
// if rest.skip_pre_len() > 0 {
// del_inner.place(&DelSkip(rest.skip_pre_len()));
// }
// addres.place_all(&rest);
// }
// delres.place(&DelGroup(del_inner));
// if b_stepper.clone().into_span().skip_post_len() > 0 {
// addres.place(&AddSkip(b_stepper.into_span().skip_post_len()));
// }
}
AddGroup(_, insspan) => {
a.next();
b.next();
let Op(del, ins) = compose_add_del(&insspan, &span);
delres.place_all(&del[..]);
addres.place_all(&ins[..]);
}
}
} // DelObject => {
// match a.get_head() {
// AddSkip(acount) => {
// if acount > 1 {
// a.head = Some(AddSkip(acount - 1));
// delres.place(&b.next().unwrap());
// } else {
// a.next();
// delres.place(&b.next().unwrap());
// }
// }
// _ => {
// panic!("Bad");
// }
// }
// }
// DelMany(bcount) => {
// match a.get_head() {
// AddText(avalue) => {
// let alen = avalue.chars().count();
// if bcount < alen {
// a.head = Some(AddText(avalue.chars().skip(bcount).collect()));
// b.next();
// } else if bcount > alen {
// a.next();
// b.head = Some(DelMany(bcount - alen));
// } else {
// a.next();
// b.next();
// }
// }
// AddSkip(acount) => {
// if bcount < acount {
// a.head = Some(AddSkip(acount - bcount));
// delres.place(&b.next().unwrap());
// } else if bcount > acount {
// a.next();
// delres.place(&DelMany(acount));
// b.head = Some(DelMany(bcount - acount));
// } else {
// a.next();
// b.next();
// }
// }
// AddGroup(attr, ins_span) => {
// if bcount > 1 {
// a.next();
// delres.place(&DelMany(ins_span.skip_len()));
// b.head = Some(DelMany(bcount - 1));
// } else {
// a.next();
// b.next();
// }
// }
// AddWithGroup(insspan) => {
// if bcount > 1 {
// a.next();
// b.head = Some(DelMany(bcount - 1));
// } else {
// a.next();
// b.next();
// }
// delres.place(&DelMany(1));
// }
// }
// }
// DelGroupAll => {
// match a.get_head() {
// AddText(avalue) => {
// panic!("DelGroupAll by AddText is ILLEGAL");
// }
// AddSkip(acount) => {
// delres.place(&b.next().unwrap());
// if acount > 1 {
// a.head = Some(AddSkip(acount - 1));
// } else {
// a.next();
// }
// }
// AddWithGroup(insspan) => {
// a.next();
// delres.place(&b.next().unwrap());
// }
// AddGroup(attr, insspan) => {
// a.next();
// b.next();
// }
// }
// }
}
}
}
pub fn compose<S: Schema>(a: &Op<S>, b: &Op<S>) -> Op<S> {
let &Op(ref adel, ref ains) = a;
let &Op(ref bdel, ref bins) = b;
log_compose!("`````````````` >(compose)<");
log_compose!("``````````````a_ins {:?}", ains);
log_compose!("``````````````b_del {:?}", bdel);
let Op(mdel, mins) = compose_add_del(ains, bdel);
log_compose!("`````````````` a=> {:?}", mdel);
log_compose!("`````````````` b=> {:?}", mins);
log_compose!("``````````````a_del {:?}", adel);
log_compose!("`````````````` a=> {:?}", mdel);
let a_ = compose_del_del(adel, &mdel);
log_compose!("`````````````` del' {:?}", a_);
log_compose!("`````````````` b=> {:?}", mins);
log_compose!("`````````````b_ins {:?}", bins);
let b_ = compose_add_add(&mins, bins);
log_compose!("`````````````` ins' {:?}", b_);
log_compose!();
log_compose!();
Op(a_, b_)
}
| 40.215548 | 105 | 0.338869 |
2127a939a5c9545b78f6961d6ae8202cf7e6ef73 | 6,941 | // Copyright (c) 2015 Anders Kaseorg <[email protected]>
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// “Software”), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This crate exports a macro `enum_from_primitive!` that wraps an
//! `enum` declaration and automatically adds an implementation of
//! `num::FromPrimitive` (reexported here), to allow conversion from
//! primitive integers to the enum. It therefore provides an
//! alternative to the built-in `#[derive(FromPrimitive)]`, which
//! requires the unstable `std::num::FromPrimitive` and is disabled in
//! Rust 1.0.
//!
//! # Example
//!
//! ```
//! #[macro_use]
//! extern crate enum_primitive;
//! use enum_primitive::num_traits::FromPrimitive;
//!
//! enum_from_primitive! {
//! #[derive(Debug, PartialEq)]
//! enum FooBar {
//! Foo = 17,
//! Bar = 42,
//! Baz,
//! }
//! }
//!
//! fn main() {
//! assert_eq!(FooBar::from_i32(17), Some(FooBar::Foo));
//! assert_eq!(FooBar::from_i32(42), Some(FooBar::Bar));
//! assert_eq!(FooBar::from_i32(43), Some(FooBar::Baz));
//! assert_eq!(FooBar::from_i32(91), None);
//! }
//! ```
pub extern crate num_traits;
pub use std::option::Option;
pub use num_traits::FromPrimitive;
/// Helper macro for internal use by `enum_from_primitive!`.
#[macro_export]
macro_rules! enum_from_primitive_impl_ty {
($meth:ident, $ty:ty, $name:ident, $( $variant:ident )*) => {
#[allow(non_upper_case_globals, unused)]
fn $meth(n: $ty) -> $crate::Option<Self> {
$( if n == $name::$variant as $ty {
$crate::Option::Some($name::$variant)
} else )* {
$crate::Option::None
}
}
};
}
/// Helper macro for internal use by `enum_from_primitive!`.
#[macro_export]
#[macro_use(enum_from_primitive_impl_ty)]
macro_rules! enum_from_primitive_impl {
($name:ident, $( $variant:ident )*) => {
impl $crate::FromPrimitive for $name {
enum_from_primitive_impl_ty! { from_i64, i64, $name, $( $variant )* }
enum_from_primitive_impl_ty! { from_u64, u64, $name, $( $variant )* }
}
};
}
/// Wrap this macro around an `enum` declaration to get an
/// automatically generated implementation of `num::FromPrimitive`.
#[macro_export]
#[macro_use(enum_from_primitive_impl)]
macro_rules! enum_from_primitive {
(
$( #[$enum_attr:meta] )*
enum $name:ident {
$( $( #[$variant_attr:meta] )* $variant:ident ),+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*
}
) => {
$( #[$enum_attr] )*
enum $name {
$( $( #[$variant_attr] )* $variant ),+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*
}
enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
};
(
$( #[$enum_attr:meta] )*
enum $name:ident {
$( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),*
}
) => {
$( #[$enum_attr] )*
enum $name {
$( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),*
}
enum_from_primitive_impl! { $name, $( $( $variant )+ )* }
};
(
$( #[$enum_attr:meta] )*
enum $name:ident {
$( $( #[$variant_attr:meta] )* $variant:ident ),+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*,
}
) => {
$( #[$enum_attr] )*
enum $name {
$( $( #[$variant_attr] )* $variant ),+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*,
}
enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
};
(
$( #[$enum_attr:meta] )*
enum $name:ident {
$( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),+,
}
) => {
$( #[$enum_attr] )*
enum $name {
$( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),+,
}
enum_from_primitive_impl! { $name, $( $( $variant )+ )+ }
};
(
$( #[$enum_attr:meta] )*
pub enum $name:ident {
$( $( #[$variant_attr:meta] )* $variant:ident ),+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*
}
) => {
$( #[$enum_attr] )*
pub enum $name {
$( $( #[$variant_attr] )* $variant ),+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*
}
enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
};
(
$( #[$enum_attr:meta] )*
pub enum $name:ident {
$( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),*
}
) => {
$( #[$enum_attr] )*
pub enum $name {
$( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),*
}
enum_from_primitive_impl! { $name, $( $( $variant )+ )* }
};
(
$( #[$enum_attr:meta] )*
pub enum $name:ident {
$( $( #[$variant_attr:meta] )* $variant:ident ),+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*,
}
) => {
$( #[$enum_attr] )*
pub enum $name {
$( $( #[$variant_attr] )* $variant ),+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*,
}
enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
};
(
$( #[$enum_attr:meta] )*
pub enum $name:ident {
$( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),+,
}
) => {
$( #[$enum_attr] )*
pub enum $name {
$( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),+,
}
enum_from_primitive_impl! { $name, $( $( $variant )+ )+ }
};
}
| 35.233503 | 149 | 0.547472 |
013bf16425e2a31a0ab0e763e5a5c0a6ed995af7 | 1,411 | use crate::types::{PublicKey, SecretKey};
use ockam_core::Result;
use rand_core::{CryptoRng, RngCore};
const PARTIAL_SIGNATURE_BYTES: usize = 49;
///MPC and threashold signing functionality
pub trait SecretKeyShareVault {
/// Secret share this key by creating `N` shares where `T` are required
/// to combine back into this secret
fn split_secret<R: CryptoRng + RngCore, const T: usize, const N: usize>(
&self, secret: &SecretKey, rng: &mut R
) -> Result<[SecretKey; N]>;
/// Reconstruct a secret key from shares created from `split_secret`
fn combine_shares<const T: usize, const N: usize>(
&self,
shares: &[SecretKey]) -> Result<SecretKey>;
/// Reconstruct a signature from partial signatures created from `partial_sign`
fn combine_signatures<const T: usize, const N: usize>(
&self,
signatures: &[[u8; PARTIAL_SIGNATURE_BYTES]]
) -> Result<[u8; PARTIAL_SIGNATURE_BYTES -1 ]>;
/// Create a new partial signature
fn partial_sign<B: AsRef<[u8]>>(
&self,
sk: &SecretKey, msg: &B) -> Result<[u8; PARTIAL_SIGNATURE_BYTES]>;
/// Verify if the combined signatures from `combine_signatures` was signed with `msg` with `pk`
fn verify_signatures<B: AsRef<[u8]>>(
&self,
signature: &[u8; PARTIAL_SIGNATURE_BYTES -1],
pk: &PublicKey, msg: &B) -> Result<bool>;
} | 38.135135 | 100 | 0.649185 |
1eaa3054320bdfc7083d2434dbcede92c8dd291f | 936 | use crate::{
cond_stmt::CondStmt,
executor::{Executor, StatusType},
mut_input::{self, MutInput},
};
use angora_common::config;
use rand::prelude::*;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
mod method;
pub use self::method::*;
mod grad;
use self::grad::*;
pub mod interesting_val;
pub use self::interesting_val::*;
mod handler;
pub use self::handler::SearchHandler;
pub mod gd;
pub use self::gd::GdSearch;
pub mod random;
pub use self::random::RandomSearch;
pub mod cbh;
pub use self::cbh::CbhSearch;
pub mod mb;
pub use self::mb::MbSearch;
// Other cases of special offsets
pub mod cmpfn;
pub use self::cmpfn::FnFuzz;
pub mod len;
pub use self::len::LenFuzz;
pub mod afl;
pub use self::afl::AFLFuzz;
pub mod exploit;
pub use self::exploit::ExploitFuzz;
pub mod det;
pub use self::det::DetFuzz;
pub mod one_byte;
pub use self::one_byte::OneByteFuzz;
mod newgd;
pub use self::newgd::IntGdSearch;
| 19.5 | 37 | 0.709402 |
3aa6bf410592c8322395437f19590197a215eb97 | 6,312 | use array_macro::array;
use core::{mem::size_of_val, ptr::NonNull};
use core::ops::{ DerefMut };
use super::*;
use crate::define::{
param::NPROC,
memlayout::{ PGSIZE, TRAMPOLINE }
};
use crate::lock::spinlock::{ Spinlock, SpinlockGuard };
use crate::register::sstatus::intr_on;
use crate::memory::*;
pub struct ProcManager{
proc: [Process; NPROC],
init_proc: Process
}
pub static mut PROC_MANAGER:ProcManager = ProcManager::new();
pub static PID_LOCK:Spinlock<()> = Spinlock::new((), "pid_lock");
// helps ensure that wakeups of wait()ing
// parents are not lost. helps obey the
// memory model when using p->parent.
// must be acquired before any p->lock.
pub static WAIT_LOCK:Spinlock<()> = Spinlock::new((), "wait_lock");
pub static mut NEXT_PID:usize = 0;
impl ProcManager{
pub const fn new() -> Self {
Self{
proc: array![_ => Process::new(); NPROC],
init_proc: Process::new()
}
}
pub fn get_table_mut(&mut self) -> &mut [Process; NPROC] {
&mut self.proc
}
// initialize the proc table at boot time.
// Only used in boot.
pub unsafe fn proc_init(&mut self){
println!("procinit......");
for (pos, p) in self.proc.iter_mut().enumerate() {
p.extern_data.get_mut().set_kstack(kstack(pos));
}
println!("procinit done......");
}
// Allocate a page for each process's kernel stack.
// Map it high in memory, followed by an invalid
// group page
pub unsafe fn proc_mapstacks(&mut self) {
for (pos, _) in self.proc.iter_mut().enumerate() {
let pa = RawPage::new_zeroed() as *mut u8;
let va = kstack(pos);
KERNEL_PAGETABLE.kvmmap(
VirtualAddress::new(va),
PhysicalAddress::new(pa as usize),
PGSIZE,
PteFlags::R | PteFlags::W
);
}
}
// Set up first user programe
pub unsafe fn user_init(&mut self) {
println!("first user process init......");
let p = self.alloc_proc().expect("Fail to get unused process");
// allocate one user page and copy init's instructions
// and data into it.
let extern_data = p.extern_data.get_mut();
extern_data.pagetable.as_mut().unwrap().uvminit(
&INITCODE,
size_of_val(&INITCODE)
);
extern_data.size = PGSIZE;
// prepare for the very first "return" from kernel to user.
let tf = &mut *extern_data.trapframe;
tf.epc = 0; // user program counter
tf.sp = PGSIZE; // user stack pointer
extern_data.set_name("initcode");
let mut guard = p.data.acquire();
guard.set_state(Procstate::RUNNABLE);
drop(guard);
}
// Look in the process table for an UNUSED proc.
// If found, initialize state required to run in the kernel,
// and return p.acquire() held.
// If there are a free procs, or a memory allocation fails, return 0.
// TODO: possible error occurs here.
pub fn alloc_proc(&mut self) -> Option<&mut Process> {
for p in self.proc.iter_mut() {
let mut guard = p.data.acquire();
if guard.state == Procstate::UNUSED {
guard.pid = alloc_pid();
guard.set_state(Procstate::USED);
let extern_data = p.extern_data.get_mut();
// Allocate a trapframe page.
let ptr = unsafe{ RawPage::new_zeroed() as *mut u8 };
extern_data.set_trapframe(ptr as *mut Trapframe);
// An empty user page table
unsafe{
extern_data.proc_pagetable();
}
// Set up new context to start executing at forkret,
// which returns to user space.
extern_data.init_context();
drop(guard);
return Some(p);
}else {
drop(guard);
}
}
None
}
// Wake up all processes sleeping on chan.
// Must be called without any p->lock.
pub fn wakeup(&self, channel: usize) {
for p in self.proc.iter() {
let mut guard = p.data.acquire();
if guard.state == Procstate::SLEEPING && guard.channel == channel {
guard.state = Procstate::RUNNABLE;
}
drop(guard);
}
}
pub fn seek_runnable(&mut self) -> Option<&mut Process> {
for p in self.proc.iter_mut() {
let mut guard = p.data.acquire();
match guard.state {
Procstate::RUNNABLE => {
guard.state = Procstate::ALLOCATED;
drop(guard);
return Some(p)
},
_ => {
drop(guard);
},
}
}
None
}
}
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run.
// - swtch to start running that process.
// - eventually that process transfers control
// via swtch back to the scheduler.
pub unsafe fn scheduler(){
extern "C" {
fn swtch(old: *mut Context, new: *mut Context);
}
let c = CPU_MANAGER.mycpu();
c.set_proc(None);
loop{
// Avoid deadlock by ensuring that devices can interrupt.
intr_on();
match PROC_MANAGER.seek_runnable() {
Some(p) => {
c.set_proc(NonNull::new(p as *mut Process));
let mut guard = p.data.acquire();
guard.state = Procstate::RUNNING;
swtch(c.get_context_mut(),
&mut p.extern_data.get_mut().context as *mut Context);
c.set_proc(None);
drop(guard);
}
None => {}
}
}
}
pub fn alloc_pid() -> usize{
let guard = PID_LOCK.acquire();
let pid;
unsafe {
pid = NEXT_PID;
NEXT_PID += 1;
}
drop(guard);
pid
}
#[inline]
fn kstack(pos: usize) -> usize {
Into::<usize>::into(TRAMPOLINE) - (pos + 1) * 2 * PGSIZE
} | 26.859574 | 79 | 0.536755 |
3828014e98922e6ecf426ffbab85f3177414225f | 4,942 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#[macro_use]
extern crate criterion;
use criterion::Criterion;
use datafusion::datasource::file_format::csv::CsvFormat;
use datafusion::datasource::listing::{ListingOptions, ListingTable};
use datafusion::datasource::object_store::local::LocalFileSystem;
use parking_lot::Mutex;
use std::sync::Arc;
extern crate arrow;
extern crate datafusion;
use arrow::datatypes::{DataType, Field, Schema};
use datafusion::datasource::MemTable;
use datafusion::execution::context::ExecutionContext;
use tokio::runtime::Runtime;
fn query(ctx: Arc<Mutex<ExecutionContext>>, sql: &str) {
let rt = Runtime::new().unwrap();
// execute the query
let df = rt.block_on(ctx.lock().sql(sql)).unwrap();
rt.block_on(df.collect()).unwrap();
}
fn create_context() -> Arc<Mutex<ExecutionContext>> {
// define schema for data source (csv file)
let schema = Arc::new(Schema::new(vec![
Field::new("c1", DataType::Utf8, false),
Field::new("c2", DataType::UInt32, false),
Field::new("c3", DataType::Int8, false),
Field::new("c4", DataType::Int16, false),
Field::new("c5", DataType::Int32, false),
Field::new("c6", DataType::Int64, false),
Field::new("c7", DataType::UInt8, false),
Field::new("c8", DataType::UInt16, false),
Field::new("c9", DataType::UInt32, false),
Field::new("c10", DataType::UInt64, false),
Field::new("c11", DataType::Float32, false),
Field::new("c12", DataType::Float64, false),
Field::new("c13", DataType::Utf8, false),
]));
let testdata = datafusion::test_util::arrow_test_data();
// create CSV data source
let listing_options = ListingOptions::new(Arc::new(CsvFormat::default()));
let csv = ListingTable::new(
Arc::new(LocalFileSystem {}),
format!("{}/csv/aggregate_test_100.csv", testdata),
schema,
listing_options,
);
let rt = Runtime::new().unwrap();
let ctx_holder: Arc<Mutex<Vec<Arc<Mutex<ExecutionContext>>>>> =
Arc::new(Mutex::new(vec![]));
let partitions = 16;
rt.block_on(async {
// create local execution context
let mut ctx = ExecutionContext::new();
ctx.state.lock().config.target_partitions = 1;
let runtime = ctx.state.lock().runtime_env.clone();
let mem_table = MemTable::load(Arc::new(csv), Some(partitions), runtime)
.await
.unwrap();
ctx.register_table("aggregate_test_100", Arc::new(mem_table))
.unwrap();
ctx_holder.lock().push(Arc::new(Mutex::new(ctx)))
});
let ctx = ctx_holder.lock().get(0).unwrap().clone();
ctx
}
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("sort_and_limit_by_int", |b| {
let ctx = create_context();
b.iter(|| {
query(
ctx.clone(),
"SELECT c1, c13, c6, c10 \
FROM aggregate_test_100 \
ORDER BY c6
LIMIT 10",
)
})
});
c.bench_function("sort_and_limit_by_float", |b| {
let ctx = create_context();
b.iter(|| {
query(
ctx.clone(),
"SELECT c1, c13, c12 \
FROM aggregate_test_100 \
ORDER BY c13
LIMIT 10",
)
})
});
c.bench_function("sort_and_limit_lex_by_int", |b| {
let ctx = create_context();
b.iter(|| {
query(
ctx.clone(),
"SELECT c1, c13, c6, c10 \
FROM aggregate_test_100 \
ORDER BY c6 DESC, c10 DESC
LIMIT 10",
)
})
});
c.bench_function("sort_and_limit_lex_by_string", |b| {
let ctx = create_context();
b.iter(|| {
query(
ctx.clone(),
"SELECT c1, c13, c6, c10 \
FROM aggregate_test_100 \
ORDER BY c1, c13
LIMIT 10",
)
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| 31.679487 | 80 | 0.591461 |
e5a7366228583cd4bcd1a3d9947e87ba5541c9bb | 5,104 | // Copyright 2013 Axel Rasmussen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::structs::graph::{Graph, SearchResult, VertexId};
use std::collections::HashMap;
#[derive(Clone, Copy, Eq, PartialEq)]
enum VisitState {
Open,
Closed,
}
struct VertexState {
heuristic_score: i64,
actual_score: i64,
state: VisitState,
previous: Option<VertexId>,
}
impl VertexState {
pub fn with_initial_state(initial_state: VisitState) -> Self {
VertexState {
heuristic_score: i64::max_value(),
actual_score: i64::max_value(),
state: initial_state,
previous: None,
}
}
/// Set this state's absolute score. That is, update both the heuristic and
/// actual score. This is useful, for example, in Dijkstra's algorithm,
/// where we really only have a single score.
pub fn set_absolute_score(&mut self, score: i64) {
self.heuristic_score = score;
self.actual_score = score;
}
}
type VertexStateMap = HashMap<VertexId, VertexState>;
fn find_smallest_open(map: &VertexStateMap) -> Option<VertexId> {
let mut current: Option<VertexId> = None;
for entry in map {
// If this vertex isn't in the open set, skip it.
if entry.1.state != VisitState::Open {
continue;
}
// If there is no smallest vertex yet, just use this one. Otherwise, use this
// one if it is smaller than the current vertex.
if current.is_none()
|| entry.1.heuristic_score < map[current.as_ref().unwrap()].heuristic_score
{
current = Some(*entry.0);
}
}
current
}
pub fn dijkstra(graph: &Graph, start_id: VertexId, end_id: VertexId) -> Option<SearchResult> {
// Create a state struct for each vertex, where each vertex is initially
// unvisited (that is, "open"), and with a score of (effectively) infinity.
let mut vertices: VertexStateMap = graph
.all_vertices()
.into_iter()
.map(|id| (id, VertexState::with_initial_state(VisitState::Open)))
.collect();
// Set the start vertex's score to 0, and make it the current vertex.
let mut current_id: VertexId = start_id;
vertices.get_mut(&start_id).unwrap().set_absolute_score(0);
loop {
for neighbor_id in graph.get(current_id).get_neighbors().into_iter() {
// If this neighbor has already been visited, skip it.
if vertices.get(&neighbor_id).unwrap().state != VisitState::Open {
continue;
}
// Calculate a tentative distance (current + edge) and update this neighbor's
// distancei if the tentative distance is less than its current distance.
let tentative: i64 = vertices.get(¤t_id).unwrap().actual_score
+ graph.get(current_id).distance_to(neighbor_id).unwrap();
if tentative < vertices.get(&neighbor_id).unwrap().actual_score {
let neighbor: &mut VertexState = vertices.get_mut(&neighbor_id).unwrap();
neighbor.set_absolute_score(tentative);
neighbor.previous = Some(current_id);
}
}
// Mark the current node as visited.
vertices.get_mut(¤t_id).unwrap().state = VisitState::Closed;
// Stop if the end node has been visited.
if vertices.get(&end_id).unwrap().state == VisitState::Closed {
break;
}
// Find the unvisited node with the smallest score. Stop if there is no
// smallest vertex, or if its score is "infinity".
let smallest_id = find_smallest_open(&vertices);
if smallest_id.is_none() {
return None;
}
let smallest_id = smallest_id.unwrap();
if vertices.get(&smallest_id).unwrap().actual_score == i64::max_value() {
return None;
}
// Make the next smallest vertex the current vertex.
current_id = smallest_id;
}
// Navigate the path back to the start from the end, and return the resulting
// path.
let mut result = SearchResult {
path: vec![],
sum: vertices.get(&end_id).unwrap().actual_score,
};
let mut current_path_id: VertexId = end_id;
loop {
result.path.push(current_path_id);
let previous_id: Option<VertexId> = vertices.get(¤t_path_id).unwrap().previous;
if let Some(id) = previous_id {
current_path_id = id;
} else {
break;
}
}
result.path.reverse();
Some(result)
}
| 35.444444 | 94 | 0.632053 |
33c406a050087e446b5ceb07321f41a5e51c3906 | 60 | pub mod client;
pub mod error;
pub mod model;
pub mod util;
| 12 | 15 | 0.733333 |
8fb62c702e8e54cbe9256edd7ebf42f46e6c5885 | 6,646 | use core::cell::Cell;
use kernel::common::cells::TakeCell;
use kernel::common::cells::VolatileCell;
use kernel::hil::uart;
use nrf5x::pinmux::Pinmux;
pub static mut UART0: UART = UART::new();
const UART_BASE: u32 = 0x40002000;
#[repr(C)]
pub struct UartRegisters {
pub task_startrx: VolatileCell<u32>,
pub task_stoprx: VolatileCell<u32>,
pub task_starttx: VolatileCell<u32>,
pub task_stoptx: VolatileCell<u32>,
_reserved1: [u32; 3],
pub task_suspend: VolatileCell<u32>,
_reserved2: [u32; 56],
pub event_cts: VolatileCell<u32>,
pub event_ncts: VolatileCell<u32>,
pub event_rxdrdy: VolatileCell<u32>,
_reserved3: [u32; 4],
pub event_txdrdy: VolatileCell<u32>,
_reserved4: [u32; 1],
pub event_error: VolatileCell<u32>,
_reserved5: [u32; 7],
pub event_rxto: VolatileCell<u32>,
_reserved6: [u32; 46],
pub shorts: VolatileCell<u32>,
_reserved7: [u32; 64],
pub intenset: VolatileCell<u32>,
pub intenclr: VolatileCell<u32>,
_reserved8: [u32; 93],
pub errorsrc: VolatileCell<u32>,
_reserved9: [u32; 31],
pub enable: VolatileCell<u32>,
_reserved10: [u32; 1],
pub pselrts: VolatileCell<Pinmux>,
pub pseltxd: VolatileCell<Pinmux>,
pub pselcts: VolatileCell<Pinmux>,
pub pselrxd: VolatileCell<Pinmux>,
pub rxd: VolatileCell<u32>,
pub txd: VolatileCell<u32>,
_reserved11: [u32; 1],
pub baudrate: VolatileCell<u32>,
_reserved12: [u32; 17],
pub config: VolatileCell<u32>,
_reserved13: [u32; 675],
pub power: VolatileCell<u32>,
}
pub struct UART {
regs: *const UartRegisters,
client: Cell<Option<&'static uart::Client>>,
buffer: TakeCell<'static, [u8]>,
len: Cell<usize>,
index: Cell<usize>,
}
#[derive(Copy, Clone)]
pub struct UARTParams {
pub baud_rate: u32,
}
impl UART {
pub const fn new() -> UART {
UART {
regs: UART_BASE as *const UartRegisters,
client: Cell::new(None),
buffer: TakeCell::empty(),
len: Cell::new(0),
index: Cell::new(0),
}
}
/// This UART implementation uses pins 8-11:
///
/// * pin 8: RTS
/// * pin 9: TX
/// * pin 10: CTS
/// * pin 11: RX
pub fn configure(&self, tx: Pinmux, rx: Pinmux, cts: Pinmux, rts: Pinmux) {
let regs = unsafe { &*self.regs };
regs.pseltxd.set(tx);
regs.pselrxd.set(rx);
regs.pselcts.set(cts);
regs.pselrts.set(rts);
}
fn set_baud_rate(&self, baud_rate: u32) {
let regs = unsafe { &*self.regs };
match baud_rate {
1200 => regs.baudrate.set(0x0004F000),
2400 => regs.baudrate.set(0x0009D000),
4800 => regs.baudrate.set(0x0013B000),
9600 => regs.baudrate.set(0x00275000),
14400 => regs.baudrate.set(0x003B0000),
19200 => regs.baudrate.set(0x004EA000),
28800 => regs.baudrate.set(0x0075F000),
38400 => regs.baudrate.set(0x009D5000),
57600 => regs.baudrate.set(0x00EBF000),
76800 => regs.baudrate.set(0x013A9000),
115200 => regs.baudrate.set(0x01D7E000),
230400 => regs.baudrate.set(0x03AFB000),
250000 => regs.baudrate.set(0x04000000),
460800 => regs.baudrate.set(0x075F7000),
1000000 => regs.baudrate.set(0x10000000),
_ => regs.baudrate.set(0x01D7E000), //setting default to 115200
}
}
pub fn enable(&self) {
let regs = unsafe { &*self.regs };
regs.enable.set(0b100);
}
pub fn enable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.set(1 << 3 as u32);
}
pub fn enable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.set(1 << 7 as u32);
}
pub fn disable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.set(1 << 3 as u32);
}
pub fn disable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.set(1 << 7 as u32);
}
pub fn handle_interrupt(&mut self) {
let regs = unsafe { &*self.regs };
let tx = regs.event_txdrdy.get() != 0;
if tx {
regs.event_txdrdy.set(0 as u32);
if self.len.get() == self.index.get() {
regs.task_stoptx.set(1 as u32);
// Signal client write done
self.client.get().map(|client| {
self.buffer.take().map(|buffer| {
client.transmit_complete(buffer, uart::Error::CommandComplete);
});
});
return;
}
self.buffer.map(|buffer| {
regs.event_txdrdy.set(0 as u32);
regs.txd.set(buffer[self.index.get()] as u32);
let next_index = self.index.get() + 1;
self.index.set(next_index);
});
}
}
pub unsafe fn send_byte(&self, byte: u8) {
let regs = &*self.regs;
self.index.set(1);
self.len.set(1);
regs.event_txdrdy.set(0);
self.enable_tx_interrupts();
regs.task_starttx.set(1);
regs.txd.set(byte as u32);
}
pub fn tx_ready(&self) -> bool {
let regs = unsafe { &*self.regs };
regs.event_txdrdy.get() & 0b1 != 0
}
fn rx_ready(&self) -> bool {
let regs = unsafe { &*self.regs };
regs.event_rxdrdy.get() & 0b1 != 0
}
}
impl uart::UART for UART {
fn set_client(&self, client: &'static uart::Client) {
self.client.set(Some(client));
}
fn init(&self, params: uart::UARTParams) {
self.enable();
self.set_baud_rate(params.baud_rate);
}
fn transmit(&self, tx_data: &'static mut [u8], tx_len: usize) {
let regs = unsafe { &*self.regs };
if tx_len == 0 {
return;
}
self.index.set(1);
self.len.set(tx_len);
regs.event_txdrdy.set(0);
self.enable_tx_interrupts();
regs.task_starttx.set(1);
regs.txd.set(tx_data[0] as u32);
self.buffer.replace(tx_data);
}
// Blocking implementation
fn receive(&self, rx_buffer: &'static mut [u8], rx_len: usize) {
let regs = unsafe { &*self.regs };
regs.task_startrx.set(1);
let mut i = 0;
while i < rx_len {
while !self.rx_ready() {}
rx_buffer[i] = regs.rxd.get() as u8;
i += 1;
}
}
fn abort_receive(&self) {
unimplemented!()
}
}
| 28.523605 | 87 | 0.55808 |
f406de2f4d4853bf7a92075612e91142719cebae | 2,490 | /**
* [0600] Non-negative Integers without Consecutive Ones
*
* Given a positive integer n, return the number of the integers in the range [0, n] whose binary representations do not contain consecutive ones.
*
* Example 1:
*
* Input: n = 5
* Output: 5
* Explanation:
* Here are the non-negative integers <= 5 with their corresponding binary representations:
* 0 : 0
* 1 : 1
* 2 : 10
* 3 : 11
* 4 : 100
* 5 : 101
* Among them, only integer 3 disobeys the rule (two consecutive ones) and the other 5 satisfy the rule.
*
* Example 2:
*
* Input: n = 1
* Output: 2
*
* Example 3:
*
* Input: n = 2
* Output: 3
*
*
* Constraints:
*
* 1 <= n <= 10^9
*
*/
pub struct Solution {}
// problem: https://leetcode.com/problems/non-negative-integers-without-consecutive-ones/
// discuss: https://leetcode.com/problems/non-negative-integers-without-consecutive-ones/discuss/?currentPage=1&orderBy=most_votes&query=
// submission codes start here
impl Solution {
// Credit: https://leetcode.com/problems/non-negative-integers-without-consecutive-ones/discuss/103766/C%2B%2B-4-lines-DPFibonacci-6-ms
pub fn find_integers(n: i32) -> i32 {
const FB: [i32; 31] = [
2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946,
17711, 28657, 46368, 75025, 121393, 196418, 317811, 514229, 832040, 1346269, 2178309,
3524578,
];
if n < 3 {
return n + 1;
}
for bt in (0..30).rev() {
if (n & (1 << bt)) != 0 {
return if (n & (1 << (bt - 1))) != 0 {
FB[bt]
} else {
FB[bt - 1] + Self::find_integers((n & !(1 << bt)))
};
}
}
-1
}
}
// submission codes end
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_0600_example_1() {
let n = 5;
let result = 5;
assert_eq!(Solution::find_integers(n), result);
}
#[test]
fn test_0600_example_2() {
let n = 1;
let result = 2;
assert_eq!(Solution::find_integers(n), result);
}
#[test]
fn test_0600_example_3() {
let n = 2;
let result = 3;
assert_eq!(Solution::find_integers(n), result);
}
#[test]
fn test_0600_additional_1() {
let n = 1000000000;
let result = 2178309;
assert_eq!(Solution::find_integers(n), result);
}
}
| 23.055556 | 146 | 0.553012 |
62c00325f8a66c1ec381e04ce9c304c995180832 | 3,968 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use std::sync::Arc;
use chrono::DateTime;
use chrono::TimeZone;
use chrono::Utc;
use chrono_tz::Tz;
use common_arrow::arrow::datatypes::DataType as ArrowType;
use common_exception::Result;
use super::data_type::DataType;
use super::data_type::ARROW_EXTENSION_META;
use super::data_type::ARROW_EXTENSION_NAME;
use super::type_id::TypeID;
use crate::prelude::*;
#[derive(Default, Clone, serde::Deserialize, serde::Serialize)]
pub struct DateTime64Type {
precision: usize,
tz: Option<String>,
}
impl DateTime64Type {
pub fn create(precision: usize, tz: Option<String>) -> Self {
DateTime64Type { precision, tz }
}
pub fn arc(precision: usize, tz: Option<String>) -> DataTypePtr {
Arc::new(DateTime64Type { precision, tz })
}
pub fn tz(&self) -> Option<&String> {
self.tz.as_ref()
}
pub fn precision(&self) -> usize {
self.precision
}
#[inline]
pub fn utc_timestamp(&self, v: u64) -> DateTime<Utc> {
// ns
Utc.timestamp(v as i64 / 1_000_000_000, (v % 1_000_000_000) as u32)
}
#[inline]
pub fn seconds(&self, v: u64) -> u64 {
v / 1_000_000_000
}
}
#[typetag::serde]
impl DataType for DateTime64Type {
fn data_type_id(&self) -> TypeID {
TypeID::DateTime64
}
#[inline]
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn name(&self) -> &str {
"DateTime64"
}
fn default_value(&self) -> DataValue {
DataValue::UInt64(0)
}
fn create_constant_column(&self, data: &DataValue, size: usize) -> Result<ColumnRef> {
let value = data.as_u64()?;
let column = Series::from_data(&[value]);
Ok(Arc::new(ConstColumn::new(column, size)))
}
fn create_column(&self, data: &[DataValue]) -> Result<ColumnRef> {
let value = data
.iter()
.map(|v| v.as_u64())
.collect::<Result<Vec<_>>>()?;
Ok(Series::from_data(&value))
}
fn arrow_type(&self) -> ArrowType {
ArrowType::UInt64
}
fn custom_arrow_meta(&self) -> Option<BTreeMap<String, String>> {
let mut mp = BTreeMap::new();
mp.insert(ARROW_EXTENSION_NAME.to_string(), "DateTime64".to_string());
if let Some(tz) = &self.tz {
mp.insert(ARROW_EXTENSION_META.to_string(), tz.to_string());
}
Some(mp)
}
fn create_serializer(&self) -> Box<dyn TypeSerializer> {
let tz = self.tz.clone().unwrap_or_else(|| "UTC".to_string());
Box::new(DateTimeSerializer::<u64>::create(
tz.parse::<Tz>().unwrap(),
self.precision as u32,
))
}
fn create_deserializer(&self, capacity: usize) -> Box<dyn TypeDeserializer> {
let tz = self.tz.clone().unwrap_or_else(|| "UTC".to_string());
Box::new(DateTimeDeserializer::<u64> {
builder: MutablePrimitiveColumn::<u64>::with_capacity(capacity),
tz: tz.parse::<Tz>().unwrap(),
})
}
fn create_mutable(&self, capacity: usize) -> Box<dyn MutableColumn> {
Box::new(MutablePrimitiveColumn::<u64>::with_capacity(capacity))
}
}
impl std::fmt::Debug for DateTime64Type {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}({})", self.name(), self.precision())
}
}
| 28.753623 | 90 | 0.617188 |
89ee1ab98432fa0129d55462b5b65897ce560aa6 | 1,114 | use glifparser::glif::{DashContour, Glif, MFEKContour, MFEKOutline};
use super::ContourOperation;
use crate::util::MFEKGlifPointData;
impl ContourOperation for DashContour {
fn build(&self, contour: &MFEKContour<MFEKGlifPointData>) -> MFEKOutline<MFEKGlifPointData> {
let mut glif = Glif::default();
glif.outline = Some(vec![contour.inner.clone()]);
let dash_output = MFEKmath::dash_along_glif(&glif, self);
let mut output: MFEKOutline<MFEKGlifPointData> = Vec::new();
if let Some(outline) = dash_output.outline {
for contour in outline {
output.push(contour.into());
}
}
output
}
fn sub(&self, _contour: &MFEKContour<MFEKGlifPointData>, _begin: usize, _end: usize) -> Self {
self.clone()
}
fn append(
&self,
_contour: &MFEKContour<MFEKGlifPointData>,
_append: &MFEKContour<MFEKGlifPointData>,
) -> Self {
self.clone()
}
fn insert(&self, _contour: &MFEKContour<MFEKGlifPointData>, _point_idx: usize) -> Self {
self.clone()
}
}
| 29.315789 | 98 | 0.621185 |
4aeadb41652558d4174841081529a71491a5e4a5 | 209 | pub mod basic;
/// Specifies the compaction strategy used to compact SSTables.
pub enum CompactionStrategy {
/// Represents Basic compaction Strategy. See `BasicCompaction` for more details.
BASIC,
}
| 26.125 | 85 | 0.751196 |
8f57a058654ba0642d9ebadee90c9fa167227408 | 1,766 | mod dummy_server_backend;
mod test_value;
use chrono::prelude::*;
pub use dummy_server_backend::DummyServerBackend;
use libchordr::prelude::*;
pub use test_value::TestValue;
pub fn entry<S: Into<String>>(id: S) -> SetlistEntry {
SetlistEntry::from_song_with_settings(&test_song(id), SongSettings::default())
}
pub fn test_song<S: Into<String>>(id: S) -> TestSong {
TestSong { id: id.into() }
}
pub struct TestSong {
id: String,
}
impl SongIdTrait for TestSong {}
impl ListEntryTrait for TestSong {
type Id = SongId;
fn id(&self) -> SongId {
self.id.as_str().into()
}
}
impl SongData for TestSong {
fn title(&self) -> String {
self.id.clone()
}
fn file_type(&self) -> FileType {
FileType::Chorddown
}
}
pub(crate) fn get_test_user() -> User {
User::new(
Username::new("my-username").unwrap(), // username
"Daniel".to_string(), // first_name
"Corn".to_string(), // last_name
Password::new("mypass123").unwrap(), // password
)
}
pub(crate) fn get_test_user_password_hidden() -> User {
User::new(
Username::new("my-username").unwrap(), // username
"Daniel".to_string(), // first_name
"Corn".to_string(), // last_name
Password::default(), // password
)
}
pub(crate) fn get_test_setlist(user: User) -> Setlist {
Setlist::new(
"My setlist",
10291,
user,
None,
Some(Utc.ymd(2014, 11, 14).and_hms(8, 9, 10)),
Utc.ymd(2020, 06, 14).and_hms(16, 26, 20),
Utc.ymd(2020, 11, 01).and_hms(19, 17, 14),
vec![entry("song-1"), entry("song-2"), entry("song-3")],
)
}
| 25.228571 | 82 | 0.566251 |
b9b5a103de0def189bb3bc0043814afba277296f | 431 | #[cfg(test)]
use mv_list::MoveVec;
#[cfg(test)]
pub fn assert_list_includes_moves(list: &MoveVec, moves: &[&'static str]) {
for &m in moves.iter() {
assert!(list.iter().map(|m| m.to_string()).any(|mv| mv == m));
}
}
#[cfg(test)]
pub fn assert_list_excludes_moves(list: &MoveVec, moves: &[&'static str]) {
for &m in moves.iter() {
assert!(list.iter().map(|m| m.to_string()).all(|mv| mv != m));
}
}
| 25.352941 | 75 | 0.584687 |
f78b0b585fc938839398dd419ddd7d84e69bbd8a | 17,946 | use crate::tagvalue::datatypes::*;
use crate::{Buffer, TagU16};
use std::convert::TryInto;
use std::string::ToString;
/// A trait for (de)serializing data directly into a [`Buffer`].
pub trait FixValue<'a>
where
Self: Sized,
{
type Error;
type SerializeSettings: Default;
/// Flag that is enabled if and only if the byte representation of `Self` is
/// always valid ASCII.
///
/// This flag is currently not used, but it might be once Rust supports
/// fully-fledged `const` generics.
const IS_ASCII: bool;
/// Writes `self` to `buffer` using default settings.
#[inline(always)]
fn serialize<B>(&self, buffer: &mut B) -> usize
where
B: Buffer,
{
self.serialize_with(buffer, Self::SerializeSettings::default())
}
/// Writes `self` to `buffer` using custom serialization `settings`.
fn serialize_with<B>(&self, buffer: &mut B, _settings: Self::SerializeSettings) -> usize
where
B: Buffer;
/// Parses and deserializes from `data`.
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error>;
/// Like [`Self::deserialize`], but it's allowed to skip *some* amount of
/// input checking. Invalid inputs might not trigger errors and instead be
/// deserialized as random values.
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
Self::deserialize(data)
}
/// Serializes `self` to a [`Vec`] of bytes, allocated on the fly.
fn to_bytes(&self) -> Vec<u8> {
let mut buffer = Vec::new();
self.serialize(&mut buffer);
buffer
}
/// Allocates a [`String`] representation of `self`.
///
/// # Panics
/// This function will panic if the underlying byte representation is not
/// valid UTF-8. As such, you should only *ever* use this function for
/// [`FixValue`] implementors that are guaranteed to be representable
/// with valid UTF-8
/// (like numbers with ASCII digits).
fn to_string(&self) -> String {
String::from_utf8(self.to_bytes()).expect("Invalid UTF-8 representation of FIX field.")
}
}
/// Byte-padding instructions for byte strings.
#[derive(Debug, Copy, Clone)]
pub struct Padding {
pub len: usize,
pub byte: u8,
}
impl Default for Padding {
#[inline(always)]
fn default() -> Self {
Self { len: 0, byte: 0 }
}
}
impl Padding {
#[inline(always)]
pub fn zeros(len: usize) -> Self {
Self { len, byte: b'0' }
}
}
#[derive(Debug, Copy, Clone)]
pub struct WithMilliseconds(pub bool);
impl Default for WithMilliseconds {
fn default() -> Self {
Self(true)
}
}
#[cfg(feature = "utils-chrono")]
impl<'a> FixValue<'a> for chrono::DateTime<chrono::Utc> {
type Error = &'static str;
type SerializeSettings = WithMilliseconds;
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize<B>(&self, buffer: &mut B) -> usize
where
B: Buffer,
{
// Serialize with milliseconds by default.
self.serialize_with(buffer, WithMilliseconds(true))
}
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, settings: Self::SerializeSettings) -> usize
where
B: Buffer,
{
use chrono::{Datelike, Timelike};
(self.year() as u32).serialize_with(buffer, Padding::zeros(4));
(self.month() as u32).serialize_with(buffer, Padding::zeros(2));
(self.day() as u32).serialize_with(buffer, Padding::zeros(2));
buffer.extend_from_slice(b"-");
(self.hour() as u32).serialize_with(buffer, Padding::zeros(2));
buffer.extend_from_slice(b":");
(self.minute() as u32).serialize_with(buffer, Padding::zeros(2));
buffer.extend_from_slice(b":");
(self.second() as u32).serialize_with(buffer, Padding::zeros(2));
if settings.0 {
buffer.extend_from_slice(b".");
(self.nanosecond() / 10E6 as u32).serialize_with(buffer, Padding::zeros(3));
21
} else {
17
}
}
#[inline(always)]
fn deserialize(_data: &'a [u8]) -> Result<Self, Self::Error> {
Err("TODO")
}
}
#[cfg(feature = "utils-chrono")]
impl<'a> FixValue<'a> for chrono::NaiveDate {
type Error = &'static str;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: Self::SerializeSettings) -> usize
where
B: Buffer,
{
use chrono::Datelike;
(self.year() as u32).serialize_with(buffer, Padding::zeros(4));
(self.month() as u32).serialize_with(buffer, Padding::zeros(2));
(self.day() as u32).serialize_with(buffer, Padding::zeros(2));
8
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let date = Date::deserialize(data).map_err(|_| "Invalid date format.")?;
date.to_chrono_naive().ok_or("Invalid date range.")
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
let date = Date::deserialize_lossy(data).map_err(|_| "Invalid date format.")?;
date.to_chrono_naive().ok_or("Invalid date range.")
}
}
#[cfg(feature = "utils-rust-decimal")]
impl<'a> FixValue<'a> for rust_decimal::Decimal {
type Error = error::Decimal;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
// TODO: Remove allocations.
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.as_bytes().len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
use std::str::FromStr;
let s = std::str::from_utf8(data).map_err(|_| Self::Error::NotUtf8)?;
rust_decimal::Decimal::from_str(s).map_err(|err| Self::Error::Other(err.to_string()))
}
}
#[cfg(feature = "utils-decimal")]
impl<'a> FixValue<'a> for decimal::d128 {
type Error = decimal::Status;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
// TODO: Remove allocations.
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.as_bytes().len()
}
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
use std::str::FromStr;
decimal::d128::set_status(decimal::Status::empty());
let s = std::str::from_utf8(data).unwrap_or("invalid UTF-8");
let number =
decimal::d128::from_str(s).expect("decimal::d128 should always parse without errors");
let status = decimal::d128::get_status();
if status.is_empty() {
Ok(number)
} else {
Err(status)
}
}
}
impl<'a> FixValue<'a> for bool {
type Error = error::Bool;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let byte = if *self { b'Y' } else { b'N' };
buffer.extend_from_slice(&[byte]);
1
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
if data.len() != 1 {
Err(Self::Error::WrongLength)
} else if data[0] == b'Y' {
Ok(true)
} else if data[0] == b'N' {
Ok(false)
} else {
Err(Self::Error::InvalidCharacter)
}
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
if data.len() != 1 {
Err(Self::Error::WrongLength)
} else {
Ok(data[0] == b'Y')
}
}
}
impl<'a> FixValue<'a> for &'a str {
type Error = std::str::Utf8Error;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(self.as_bytes());
self.as_bytes().len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
std::str::from_utf8(data)
}
}
impl<'a> FixValue<'a> for u8 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(&[*self]);
1
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
Ok(data[0])
}
}
impl<'a> FixValue<'a> for &'a [u8] {
type Error = ();
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(self);
self.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
Ok(data)
}
}
impl<'a, const N: usize> FixValue<'a> for [u8; N] {
type Error = ();
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, settings: ()) -> usize
where
B: Buffer,
{
(&self).serialize_with(buffer, settings)
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
data.try_into().map_err(|_| ())
}
}
impl<'a, const N: usize> FixValue<'a> for &'a [u8; N] {
type Error = ();
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(&self[..]);
self.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
data.try_into().map_err(|_| ())
}
}
impl<'a> FixValue<'a> for TagU16 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_u16(digit: u8) -> u16 {
(digit as u16).wrapping_sub(b'0' as u16)
}
let mut n = 0u16;
for byte in data.iter().copied() {
n = n.wrapping_mul(10).wrapping_add(ascii_digit_to_u16(byte));
}
TagU16::new(n).ok_or(Self::Error::Other)
}
}
impl<'a> FixValue<'a> for u32 {
type Error = error::Int;
type SerializeSettings = Padding;
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, padding: Self::SerializeSettings) -> usize
where
B: Buffer,
{
if padding.len == 0 {
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
return s.len();
}
let initial_len = buffer.len();
buffer.resize(buffer.len() + padding.len, padding.byte);
let bytes = buffer.as_mut_slice();
let mut multiplier = 1;
for i in (0..padding.len).rev() {
bytes[i + initial_len] = ((self / multiplier) % 10).wrapping_add(b'0' as u32) as u8;
multiplier *= 10;
}
padding.len
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_u32(digit: u8) -> u32 {
(digit as u32).wrapping_sub(b'0' as u32)
}
let mut n = 0u32;
for byte in data.iter().copied() {
n = n.wrapping_mul(10).wrapping_add(ascii_digit_to_u32(byte));
}
Ok(n)
}
}
impl<'a> FixValue<'a> for i32 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_i32(digit: u8) -> i32 {
digit as i32 - b'0' as i32
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_i32(byte);
}
let sign = if data[0] == b'-' { -1 } else { 1 };
Ok(n * sign)
}
}
impl<'a> FixValue<'a> for u64 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_u64(digit: u8) -> u64 {
digit as u64 - b'0' as u64
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_u64(byte);
}
Ok(n)
}
}
impl<'a> FixValue<'a> for i64 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_i64(digit: u8) -> i64 {
digit as i64 - b'0' as i64
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_i64(byte);
}
let sign = if data[0] == b'-' { -1 } else { 1 };
Ok(n * sign)
}
}
impl<'a> FixValue<'a> for usize {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_usize(digit: u8) -> usize {
digit as usize - b'0' as usize
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_usize(byte);
}
Ok(n)
}
}
#[cfg(test)]
mod test {
use super::*;
use quickcheck_macros::quickcheck;
#[test]
fn serialize_bools() {
let mut buffer = Vec::new();
assert_eq!(true.serialize(&mut buffer), 1);
assert_eq!(false.serialize(&mut buffer), 1);
assert_eq!(&buffer[..], b"YN" as &[u8]);
}
#[quickcheck]
fn serialize_bytes(data: Vec<Vec<u8>>) -> bool {
let mut buffer = Vec::new();
for slice in data.iter() {
assert_eq!((&slice[..]).serialize(&mut buffer), slice.len());
}
&buffer[..] == &data.iter().flatten().copied().collect::<Vec<u8>>()[..]
}
#[quickcheck]
fn u32_serialize(n: u32) -> bool {
let buffer = &mut Vec::new();
let s = FixValue::to_string(&n);
let bytes = s.as_bytes();
let len = n.serialize(buffer);
bytes == buffer.as_slice() && len == bytes.len()
}
#[test]
fn serialize_country() {
let mut buffer = Vec::new();
assert_eq!(b"IT".serialize(&mut buffer), 2);
assert_eq!(&buffer[..], b"IT" as &[u8]);
}
#[test]
fn serialize_currency() {
let mut buffer = Vec::new();
assert_eq!(b"USD".serialize(&mut buffer), 3);
assert_eq!(&buffer[..], b"USD" as &[u8]);
}
}
| 27.909798 | 98 | 0.559679 |
d713ce135f4d2e0479084fdc098397ad2a427ed1 | 676 | use serde_derive::Deserialize;
#[derive(Deserialize,Debug,Clone)]
pub struct Config {
pub ip: Option<String>,
pub port: Option<u16>,
pub log: Option<String>,
pub backend: Backend,
#[serde(rename = "middleware")]
pub middlewares: Vec<MiddlewareConfig>
}
#[derive(Deserialize,Debug,Clone)]
pub struct MiddlewareConfig {
pub url: String,
pub timeout_ms: Option<u32>,
pub request: bool,
pub response: bool
}
#[derive(Deserialize,Debug,Clone)]
pub struct Backend {
pub url: String,
pub timeout_ms: Option<u32>,
pub version: Option<HttpVersion>
}
#[derive(Deserialize,Debug,Clone)]
pub enum HttpVersion {
HTTP,
HTTP2
} | 21.125 | 42 | 0.684911 |
f5521f7da853bd025b017cac1cf7a3a4d8c463fd | 673 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::marker::MarkerTrait;
trait Foo : MarkerTrait {
type T;
}
impl Foo for i32 {
type T = int;
}
fn main() {
let x: <i32 as Foo>::T = 22;
let y: int = 44;
assert_eq!(x * 2, y);
}
| 25.884615 | 68 | 0.680535 |
91a762d80088f5b0ce925f1edcd94aa5274804ce | 37,108 | use std::fmt;
use std::io;
use std::mem;
use serde::ser::{
Error as SerdeError, Serialize, SerializeMap, SerializeSeq,
SerializeStruct, SerializeStructVariant, SerializeTuple,
SerializeTupleStruct, SerializeTupleVariant, Serializer,
};
use serde::serde_if_integer128;
use crate::error::{Error, ErrorKind};
use crate::writer::Writer;
/// Serialize the given value to the given writer, and return an error if
/// anything went wrong.
pub fn serialize<S: Serialize, W: io::Write>(
wtr: &mut Writer<W>,
value: S,
) -> Result<(), Error> {
value.serialize(&mut SeRecord { wtr })
}
struct SeRecord<'w, W: 'w + io::Write> {
wtr: &'w mut Writer<W>,
}
impl<'a, 'w, W: io::Write> Serializer for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
type SerializeSeq = Self;
type SerializeTuple = Self;
type SerializeTupleStruct = Self;
type SerializeTupleVariant = Self;
type SerializeMap = Self;
type SerializeStruct = Self;
type SerializeStructVariant = Self;
fn serialize_bool(self, v: bool) -> Result<Self::Ok, Self::Error> {
if v {
self.wtr.write_field("true")
} else {
self.wtr.write_field("false")
}
}
fn serialize_i8(self, v: i8) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_i16(self, v: i16) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_i32(self, v: i32) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_i64(self, v: i64) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
serde_if_integer128! {
fn serialize_i128(self, v: i128) -> Result<Self::Ok, Self::Error> {
self.collect_str(&v)
}
}
fn serialize_u8(self, v: u8) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_u16(self, v: u16) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_u32(self, v: u32) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_u64(self, v: u64) -> Result<Self::Ok, Self::Error> {
let mut buffer = itoa::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
serde_if_integer128! {
fn serialize_u128(self, v: u128) -> Result<Self::Ok, Self::Error> {
self.collect_str(&v)
}
}
fn serialize_f32(self, v: f32) -> Result<Self::Ok, Self::Error> {
let mut buffer = ryu::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_f64(self, v: f64) -> Result<Self::Ok, Self::Error> {
let mut buffer = ryu::Buffer::new();
self.wtr.write_field(buffer.format(v))
}
fn serialize_char(self, v: char) -> Result<Self::Ok, Self::Error> {
self.wtr.write_field(v.encode_utf8(&mut [0; 4]))
}
fn serialize_str(self, value: &str) -> Result<Self::Ok, Self::Error> {
self.wtr.write_field(value)
}
fn serialize_bytes(self, value: &[u8]) -> Result<Self::Ok, Self::Error> {
self.wtr.write_field(value)
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
self.wtr.write_field(&[])
}
fn serialize_some<T: ?Sized + Serialize>(
self,
value: &T,
) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
None::<()>.serialize(self)
}
fn serialize_unit_struct(
self,
name: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.wtr.write_field(name)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.wtr.write_field(variant)
}
fn serialize_newtype_struct<T: ?Sized + Serialize>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + Serialize>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_seq(
self,
_len: Option<usize>,
) -> Result<Self::SerializeSeq, Self::Error> {
Ok(self)
}
fn serialize_tuple(
self,
_len: usize,
) -> Result<Self::SerializeTuple, Self::Error> {
Ok(self)
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Ok(self)
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(Error::custom("serializing enum tuple variants is not supported"))
}
fn serialize_map(
self,
_len: Option<usize>,
) -> Result<Self::SerializeMap, Self::Error> {
// The right behavior for serializing maps isn't clear.
Err(Error::custom(
"serializing maps is not supported, \
if you have a use case, please file an issue at \
https://github.com/BurntSushi/rust-csv",
))
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Ok(self)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::custom("serializing enum struct variants is not supported"))
}
}
impl<'a, 'w, W: io::Write> SerializeSeq for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized + Serialize>(
&mut self,
value: &T,
) -> Result<(), Self::Error> {
value.serialize(&mut **self)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeTuple for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized + Serialize>(
&mut self,
value: &T,
) -> Result<(), Self::Error> {
value.serialize(&mut **self)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeTupleStruct for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
value: &T,
) -> Result<(), Self::Error> {
value.serialize(&mut **self)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeTupleVariant for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
_value: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn end(self) -> Result<Self::Ok, Self::Error> {
unreachable!()
}
}
impl<'a, 'w, W: io::Write> SerializeMap for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_key<T: ?Sized + Serialize>(
&mut self,
_key: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn serialize_value<T: ?Sized + Serialize>(
&mut self,
_value: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn end(self) -> Result<Self::Ok, Self::Error> {
unreachable!()
}
}
impl<'a, 'w, W: io::Write> SerializeStruct for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
_key: &'static str,
value: &T,
) -> Result<(), Self::Error> {
value.serialize(&mut **self)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeStructVariant for &'a mut SeRecord<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
_key: &'static str,
_value: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn end(self) -> Result<Self::Ok, Self::Error> {
unreachable!()
}
}
impl SerdeError for Error {
fn custom<T: fmt::Display>(msg: T) -> Error {
Error::new(ErrorKind::Serialize(msg.to_string()))
}
}
fn error_scalar_outside_struct<T: fmt::Display>(name: T) -> Error {
Error::custom(format!(
"cannot serialize {} scalar outside struct \
when writing headers from structs",
name
))
}
fn error_container_inside_struct<T: fmt::Display>(name: T) -> Error {
Error::custom(format!(
"cannot serialize {} container inside struct \
when writing headers from structs",
name
))
}
/// Write header names corresponding to the field names of the value (if the
/// value has field names).
///
/// If the type to be serialized has field names (e.g. it's a struct), then
/// header names are written, and the `Ok` return value is `true`.
///
/// If the type to be serialized doesn't have field names, then nothing is
/// written, and the `Ok` return value is `false`.
pub fn serialize_header<S: Serialize, W: io::Write>(
wtr: &mut Writer<W>,
value: S,
) -> Result<bool, Error> {
let mut ser = SeHeader::new(wtr);
value.serialize(&mut ser).map(|_| ser.wrote_header())
}
/// State machine for `SeHeader`.
///
/// This is a diagram of the transitions in the state machine. Note that only
/// some serialization events cause a state transition, and only for certain
/// states. For example, encountering a scalar causes a transition if the state
/// is `Write` or `EncounteredStructField`, but not if the state is
/// `ErrorIfWrite(err)` or `InStructField`.
///
/// ```text
/// +-----+
/// |Write|
/// +-----+
/// |
/// /------------------+------------------\
/// | | |
/// encounter finish encounter
/// scalar | struct field
/// | | |
/// v v v
/// +-----------------+ Ok(()) +-------------+
/// |ErrorIfWrite(err)| |InStructField|<--------\
/// +-----------------+ +-------------+ |
/// | | |
/// /------+------\ /-----------------+ |
/// | | | | |
/// encounter finish encounter finish encounter
/// struct field | container field struct field
/// | | | | |
/// v v v v |
/// Err(err) Ok(()) Err(_) +----------------------+ |
/// |EncounteredStructField| |
/// +----------------------+ |
/// | |
/// /----------+----------------/
/// | |
/// encounter finish
/// scalar |
/// | |
/// v v
/// Err(_) Ok(())
/// ```
enum HeaderState {
/// Start here. Headers need to be written if the type has field names.
Write,
/// The serializer still has not encountered a struct field. If one is
/// encountered (headers need to be written), return the enclosed error.
ErrorIfWrite(Error),
/// The serializer encountered one or more struct fields (and wrote their
/// names).
EncounteredStructField,
/// The serializer is currently in a struct field value.
InStructField,
}
struct SeHeader<'w, W: 'w + io::Write> {
wtr: &'w mut Writer<W>,
state: HeaderState,
}
impl<'w, W: io::Write> SeHeader<'w, W> {
fn new(wtr: &'w mut Writer<W>) -> Self {
SeHeader { wtr, state: HeaderState::Write }
}
fn wrote_header(&self) -> bool {
use self::HeaderState::*;
match self.state {
Write | ErrorIfWrite(_) => false,
EncounteredStructField | InStructField => true,
}
}
fn handle_scalar<T: fmt::Display>(
&mut self,
name: T,
) -> Result<(), Error> {
use self::HeaderState::*;
match self.state {
Write => {
self.state = ErrorIfWrite(error_scalar_outside_struct(name));
Ok(())
}
ErrorIfWrite(_) | InStructField => Ok(()),
EncounteredStructField => Err(error_scalar_outside_struct(name)),
}
}
fn handle_container<T: fmt::Display>(
&mut self,
name: T,
) -> Result<&mut Self, Error> {
if let HeaderState::InStructField = self.state {
Err(error_container_inside_struct(name))
} else {
Ok(self)
}
}
}
impl<'a, 'w, W: io::Write> Serializer for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
type SerializeSeq = Self;
type SerializeTuple = Self;
type SerializeTupleStruct = Self;
type SerializeTupleVariant = Self;
type SerializeMap = Self;
type SerializeStruct = Self;
type SerializeStructVariant = Self;
fn serialize_bool(self, v: bool) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_i8(self, v: i8) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_i16(self, v: i16) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_i32(self, v: i32) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_i64(self, v: i64) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
serde_if_integer128! {
fn serialize_i128(self, v: i128) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
}
fn serialize_u8(self, v: u8) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_u16(self, v: u16) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_u32(self, v: u32) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_u64(self, v: u64) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
serde_if_integer128! {
fn serialize_u128(self, v: u128) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
}
fn serialize_f32(self, v: f32) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_f64(self, v: f64) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_char(self, v: char) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(v)
}
fn serialize_str(self, value: &str) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(value)
}
fn serialize_bytes(self, _value: &[u8]) -> Result<Self::Ok, Self::Error> {
self.handle_scalar("&[u8]")
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
self.handle_scalar("None")
}
fn serialize_some<T: ?Sized + Serialize>(
self,
_value: &T,
) -> Result<Self::Ok, Self::Error> {
self.handle_scalar("Some(_)")
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
self.handle_scalar("()")
}
fn serialize_unit_struct(
self,
name: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(name)
}
fn serialize_unit_variant(
self,
name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(format!("{}::{}", name, variant))
}
fn serialize_newtype_struct<T: ?Sized + Serialize>(
self,
name: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(format!("{}(_)", name))
}
fn serialize_newtype_variant<T: ?Sized + Serialize>(
self,
name: &'static str,
_variant_index: u32,
variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error> {
self.handle_scalar(format!("{}::{}(_)", name, variant))
}
fn serialize_seq(
self,
_len: Option<usize>,
) -> Result<Self::SerializeSeq, Self::Error> {
self.handle_container("sequence")
}
fn serialize_tuple(
self,
_len: usize,
) -> Result<Self::SerializeTuple, Self::Error> {
self.handle_container("tuple")
}
fn serialize_tuple_struct(
self,
name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
self.handle_container(name)
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(Error::custom("serializing enum tuple variants is not supported"))
}
fn serialize_map(
self,
_len: Option<usize>,
) -> Result<Self::SerializeMap, Self::Error> {
// The right behavior for serializing maps isn't clear.
Err(Error::custom(
"serializing maps is not supported, \
if you have a use case, please file an issue at \
https://github.com/BurntSushi/rust-csv",
))
}
fn serialize_struct(
self,
name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
self.handle_container(name)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::custom("serializing enum struct variants is not supported"))
}
}
impl<'a, 'w, W: io::Write> SerializeSeq for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized + Serialize>(
&mut self,
value: &T,
) -> Result<(), Self::Error> {
value.serialize(&mut **self)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeTuple for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized + Serialize>(
&mut self,
value: &T,
) -> Result<(), Self::Error> {
value.serialize(&mut **self)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeTupleStruct for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
value: &T,
) -> Result<(), Self::Error> {
value.serialize(&mut **self)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeTupleVariant for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
_value: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn end(self) -> Result<Self::Ok, Self::Error> {
unreachable!()
}
}
impl<'a, 'w, W: io::Write> SerializeMap for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_key<T: ?Sized + Serialize>(
&mut self,
_key: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn serialize_value<T: ?Sized + Serialize>(
&mut self,
_value: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn end(self) -> Result<Self::Ok, Self::Error> {
unreachable!()
}
}
impl<'a, 'w, W: io::Write> SerializeStruct for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error> {
// Grab old state and update state to `EncounteredStructField`.
let old_state =
mem::replace(&mut self.state, HeaderState::EncounteredStructField);
if let HeaderState::ErrorIfWrite(err) = old_state {
return Err(err);
}
self.wtr.write_field(key)?;
// Check that there aren't any containers in the value.
self.state = HeaderState::InStructField;
value.serialize(&mut **self)?;
self.state = HeaderState::EncounteredStructField;
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
impl<'a, 'w, W: io::Write> SerializeStructVariant for &'a mut SeHeader<'w, W> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
_key: &'static str,
_value: &T,
) -> Result<(), Self::Error> {
unreachable!()
}
fn end(self) -> Result<Self::Ok, Self::Error> {
unreachable!()
}
}
#[cfg(test)]
mod tests {
use bstr::ByteSlice;
use serde::{serde_if_integer128, Serialize};
use crate::error::{Error, ErrorKind};
use crate::writer::Writer;
use super::{SeHeader, SeRecord};
fn serialize<S: Serialize>(s: S) -> String {
let mut wtr = Writer::from_writer(vec![]);
s.serialize(&mut SeRecord { wtr: &mut wtr }).unwrap();
wtr.write_record(None::<&[u8]>).unwrap();
String::from_utf8(wtr.into_inner().unwrap()).unwrap()
}
/// Serialize using `SeHeader`. Returns whether a header was written and
/// the output of the writer.
fn serialize_header<S: Serialize>(s: S) -> (bool, String) {
let mut wtr = Writer::from_writer(vec![]);
let wrote = {
let mut ser = SeHeader::new(&mut wtr);
s.serialize(&mut ser).unwrap();
ser.wrote_header()
};
(wrote, String::from_utf8(wtr.into_inner().unwrap()).unwrap())
}
fn serialize_err<S: Serialize>(s: S) -> Error {
let mut wtr = Writer::from_writer(vec![]);
s.serialize(&mut SeRecord { wtr: &mut wtr }).unwrap_err()
}
fn serialize_header_err<S: Serialize>(s: S) -> Error {
let mut wtr = Writer::from_writer(vec![]);
s.serialize(&mut SeHeader::new(&mut wtr)).unwrap_err()
}
#[test]
fn bool() {
let got = serialize(true);
assert_eq!(got, "true\n");
let (wrote, got) = serialize_header(true);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn integer() {
let got = serialize(12345);
assert_eq!(got, "12345\n");
let (wrote, got) = serialize_header(12345);
assert!(!wrote);
assert_eq!(got, "");
}
serde_if_integer128! {
#[test]
fn integer_u128() {
let got = serialize(i128::max_value() as u128 + 1);
assert_eq!(got, "170141183460469231731687303715884105728\n");
let (wrote, got) = serialize_header(12345);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn integer_i128() {
let got = serialize(i128::max_value());
assert_eq!(got, "170141183460469231731687303715884105727\n");
let (wrote, got) = serialize_header(12345);
assert!(!wrote);
assert_eq!(got, "");
}
}
#[test]
fn float() {
let got = serialize(1.23);
assert_eq!(got, "1.23\n");
let (wrote, got) = serialize_header(1.23);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn float_nan() {
let got = serialize(::std::f64::NAN);
assert_eq!(got, "NaN\n");
let (wrote, got) = serialize_header(::std::f64::NAN);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn char() {
let got = serialize('☃');
assert_eq!(got, "☃\n");
let (wrote, got) = serialize_header('☃');
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn str() {
let got = serialize("how\nare\n\"you\"?");
assert_eq!(got, "\"how\nare\n\"\"you\"\"?\"\n");
let (wrote, got) = serialize_header("how\nare\n\"you\"?");
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn bytes() {
let got = serialize(b"how\nare\n\"you\"?".as_bstr());
assert_eq!(got, "\"how\nare\n\"\"you\"\"?\"\n");
let (wrote, got) = serialize_header(&b"how\nare\n\"you\"?"[..]);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn option() {
let got = serialize(None::<()>);
assert_eq!(got, "\"\"\n");
let (wrote, got) = serialize_header(None::<()>);
assert!(!wrote);
assert_eq!(got, "");
let got = serialize(Some(5));
assert_eq!(got, "5\n");
let (wrote, got) = serialize_header(Some(5));
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn unit() {
let got = serialize(());
assert_eq!(got, "\"\"\n");
let (wrote, got) = serialize_header(());
assert!(!wrote);
assert_eq!(got, "");
let got = serialize((5, ()));
assert_eq!(got, "5,\n");
let (wrote, got) = serialize_header(());
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn struct_unit() {
#[derive(Serialize)]
struct Foo;
let got = serialize(Foo);
assert_eq!(got, "Foo\n");
let (wrote, got) = serialize_header(Foo);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn struct_newtype() {
#[derive(Serialize)]
struct Foo(f64);
let got = serialize(Foo(1.5));
assert_eq!(got, "1.5\n");
let (wrote, got) = serialize_header(Foo(1.5));
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn enum_units() {
#[derive(Serialize)]
enum Wat {
Foo,
Bar,
Baz,
}
let got = serialize(Wat::Foo);
assert_eq!(got, "Foo\n");
let (wrote, got) = serialize_header(Wat::Foo);
assert!(!wrote);
assert_eq!(got, "");
let got = serialize(Wat::Bar);
assert_eq!(got, "Bar\n");
let (wrote, got) = serialize_header(Wat::Bar);
assert!(!wrote);
assert_eq!(got, "");
let got = serialize(Wat::Baz);
assert_eq!(got, "Baz\n");
let (wrote, got) = serialize_header(Wat::Baz);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn enum_newtypes() {
#[derive(Serialize)]
enum Wat {
Foo(i32),
Bar(f32),
Baz(bool),
}
let got = serialize(Wat::Foo(5));
assert_eq!(got, "5\n");
let (wrote, got) = serialize_header(Wat::Foo(5));
assert!(!wrote);
assert_eq!(got, "");
let got = serialize(Wat::Bar(1.5));
assert_eq!(got, "1.5\n");
let (wrote, got) = serialize_header(Wat::Bar(1.5));
assert!(!wrote);
assert_eq!(got, "");
let got = serialize(Wat::Baz(true));
assert_eq!(got, "true\n");
let (wrote, got) = serialize_header(Wat::Baz(true));
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn seq() {
let got = serialize(vec![1, 2, 3]);
assert_eq!(got, "1,2,3\n");
let (wrote, got) = serialize_header(vec![1, 2, 3]);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn tuple() {
let row = (true, 1.5, "hi");
let got = serialize(row);
assert_eq!(got, "true,1.5,hi\n");
let (wrote, got) = serialize_header(row);
assert!(!wrote);
assert_eq!(got, "");
let row = (true, 1.5, vec![1, 2, 3]);
let got = serialize(row.clone());
assert_eq!(got, "true,1.5,1,2,3\n");
let (wrote, got) = serialize_header(row);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn tuple_struct() {
#[derive(Clone, Serialize)]
struct Foo(bool, i32, String);
let row = Foo(false, 42, "hi".to_string());
let got = serialize(row.clone());
assert_eq!(got, "false,42,hi\n");
let (wrote, got) = serialize_header(row);
assert!(!wrote);
assert_eq!(got, "");
}
#[test]
fn tuple_variant() {
#[derive(Clone, Serialize)]
enum Foo {
X(bool, i32, String),
}
let row = Foo::X(false, 42, "hi".to_string());
let err = serialize_err(row.clone());
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
let err = serialize_header_err(row);
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
}
#[test]
fn enum_struct_variant() {
#[derive(Clone, Serialize)]
enum Foo {
X { a: bool, b: i32, c: String },
}
let row = Foo::X { a: false, b: 1, c: "hi".into() };
let err = serialize_err(row.clone());
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
let err = serialize_header_err(row);
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
}
#[test]
fn struct_no_headers() {
#[derive(Serialize)]
struct Foo {
x: bool,
y: i32,
z: String,
}
let got = serialize(Foo { x: true, y: 5, z: "hi".into() });
assert_eq!(got, "true,5,hi\n");
}
serde_if_integer128! {
#[test]
fn struct_no_headers_128() {
#[derive(Serialize)]
struct Foo {
x: i128,
y: u128,
}
let got =
serialize(Foo { x: i128::max_value(), y: u128::max_value() });
assert_eq!(
got,
"170141183460469231731687303715884105727,\
340282366920938463463374607431768211455\n"
);
}
}
#[test]
fn struct_headers() {
#[derive(Clone, Serialize)]
struct Foo {
x: bool,
y: i32,
z: String,
}
let row = Foo { x: true, y: 5, z: "hi".into() };
let (wrote, got) = serialize_header(row.clone());
assert!(wrote);
assert_eq!(got, "x,y,z");
let got = serialize(row);
assert_eq!(got, "true,5,hi\n");
}
#[test]
fn struct_headers_nested() {
#[derive(Clone, Serialize)]
struct Foo {
label: String,
nest: Nested,
}
#[derive(Clone, Serialize)]
struct Nested {
label2: String,
value: i32,
}
let row = Foo {
label: "foo".into(),
nest: Nested { label2: "bar".into(), value: 5 },
};
let got = serialize(row.clone());
assert_eq!(got, "foo,bar,5\n");
let err = serialize_header_err(row);
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
}
#[test]
fn struct_headers_nested_seq() {
#[derive(Clone, Serialize)]
struct Foo {
label: String,
values: Vec<i32>,
}
let row = Foo { label: "foo".into(), values: vec![1, 2, 3] };
let got = serialize(row.clone());
assert_eq!(got, "foo,1,2,3\n");
let err = serialize_header_err(row);
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
}
#[test]
fn struct_headers_inside_tuple() {
#[derive(Clone, Serialize)]
struct Foo {
label: String,
num: f64,
}
#[derive(Clone, Serialize)]
struct Bar {
label2: bool,
value: i32,
empty: (),
}
let row = (
Foo { label: "hi".to_string(), num: 5.0 },
Bar { label2: true, value: 3, empty: () },
Foo { label: "baz".to_string(), num: 2.3 },
);
let got = serialize(row.clone());
assert_eq!(got, "hi,5.0,true,3,,baz,2.3\n");
let (wrote, got) = serialize_header(row);
assert!(wrote);
assert_eq!(got, "label,num,label2,value,empty,label,num");
}
#[test]
fn struct_headers_inside_tuple_scalar_before() {
#[derive(Clone, Serialize)]
struct Foo {
label: String,
num: f64,
}
let row = (3.14, Foo { label: "hi".to_string(), num: 5.0 });
let got = serialize(row.clone());
assert_eq!(got, "3.14,hi,5.0\n");
let err = serialize_header_err(row);
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
}
#[test]
fn struct_headers_inside_tuple_scalar_after() {
#[derive(Clone, Serialize)]
struct Foo {
label: String,
num: f64,
}
let row = (Foo { label: "hi".to_string(), num: 5.0 }, 3.14);
let got = serialize(row.clone());
assert_eq!(got, "hi,5.0,3.14\n");
let err = serialize_header_err(row);
match *err.kind() {
ErrorKind::Serialize(_) => {}
ref x => panic!("expected ErrorKind::Serialize but got '{:?}'", x),
}
}
#[test]
fn struct_headers_inside_seq() {
#[derive(Clone, Serialize)]
struct Foo {
label: String,
num: f64,
}
let row = vec![
Foo { label: "hi".to_string(), num: 5.0 },
Foo { label: "baz".to_string(), num: 2.3 },
];
let got = serialize(row.clone());
assert_eq!(got, "hi,5.0,baz,2.3\n");
let (wrote, got) = serialize_header(row);
assert!(wrote);
assert_eq!(got, "label,num,label,num");
}
#[test]
fn struct_headers_inside_nested_tuple_seq() {
#[derive(Clone, Serialize)]
struct Foo {
label: String,
num: f64,
}
#[derive(Clone, Serialize)]
struct Bar {
label2: Baz,
value: i32,
empty: (),
}
#[derive(Clone, Serialize)]
struct Baz(bool);
let row = (
(
Foo { label: "hi".to_string(), num: 5.0 },
Bar { label2: Baz(true), value: 3, empty: () },
),
vec![(Foo { label: "baz".to_string(), num: 2.3 },)],
);
let got = serialize(row.clone());
assert_eq!(got, "hi,5.0,true,3,,baz,2.3\n");
let (wrote, got) = serialize_header(row);
assert!(wrote);
assert_eq!(got, "label,num,label2,value,empty,label,num");
}
}
| 27.651267 | 79 | 0.507195 |
16f87833e7019eecc9b57f053988fca8a1982b82 | 636 | //! Tests for espeakng::Speaker::text_to_phonemes
mod base;
use base::init;
#[test]
fn espeak() -> Result<(), espeakng::Error> {
assert_eq!(
init().text_to_phonemes("Hello world", espeakng::PhonemeGenOptions::Standard)?.unwrap(),
include_str!("../test_data/hello_world.pho")
);
Ok(())
}
#[test]
fn mbrola() -> Result<(), espeakng::Error> {
let mut speaker = init();
speaker.set_voice_raw("mb/mb-en1")?;
assert_eq!(
speaker.text_to_phonemes("Hello world", espeakng::PhonemeGenOptions::Mbrola)?.unwrap(),
include_str!("../test_data/hello_world_mbrola.pho")
);
Ok(())
}
| 23.555556 | 96 | 0.627358 |
bf1359c054c31e54099fc6928e6945baad3535a0 | 172 | pub const RG_DEFAULT_LISTEN_ADDR: &str = "127.0.0.1";
pub const RG_HTTP_PORT: u16 = 80;
pub const RG_HTTPS_PORT: u16 = 443;
pub const RG_DEFAULT_PORT: u16 = RG_HTTP_PORT;
| 28.666667 | 53 | 0.75 |
11911bf37e8a06b870e15fd9ff8b07e564247cf4 | 22,098 | use crate::global::binary;
use crate::rpc::RpcClient;
use crate::utils::{find_available_port, temp_path, wait_until};
use crate::SYSTEM_CELL_ALWAYS_SUCCESS_INDEX;
use ckb_app_config::CKBAppConfig;
use ckb_chain_spec::consensus::Consensus;
use ckb_chain_spec::ChainSpec;
use ckb_jsonrpc_types::TxPoolInfo;
use ckb_logger::{debug, error};
use ckb_resource::Resource;
use ckb_types::{
bytes,
core::{
self, capacity_bytes, BlockBuilder, BlockNumber, BlockView, Capacity, HeaderView,
ScriptHashType, TransactionView,
},
packed::{Block, Byte32, CellDep, CellInput, CellOutput, CellOutputBuilder, OutPoint, Script},
prelude::*,
};
use std::borrow::Borrow;
use std::collections::HashSet;
use std::convert::Into;
use std::fs;
use std::path::PathBuf;
use std::process::{self, Child, Command, Stdio};
use std::thread::sleep;
use std::time::{Duration, Instant};
struct ProcessGuard {
pub child: Child,
pub killed: bool,
}
impl Drop for ProcessGuard {
fn drop(&mut self) {
if !self.killed {
match self.child.kill() {
Err(e) => error!("Could not kill ckb process: {}", e),
Ok(_) => debug!("Successfully killed ckb process"),
}
let _ = self.child.wait();
}
}
}
pub struct Node {
working_dir: PathBuf,
consensus: Consensus,
p2p_listen: String,
rpc_client: RpcClient,
node_id: Option<String>, // initialize when starts node
guard: Option<ProcessGuard>, // initialize when starts node
}
impl Node {
pub fn new(spec_name: &str, node_name: &str) -> Self {
let working_dir = temp_path(spec_name, node_name);
// Copy node template into node's working directory
let cells_dir = working_dir.join("specs").join("cells");
ckb_logger::info!("working_dir {:?}", working_dir);
fs::create_dir_all(cells_dir).expect("create node's dir");
for file in &[
"ckb.toml",
"specs/integration.toml",
"specs/cells/always_success",
] {
let src = PathBuf::from("template").join(file);
let dest = working_dir.join(file);
fs::copy(&src, &dest)
.unwrap_or_else(|_| panic!("cp {:?} {}", src.display(), dest.display()));
}
// Allocate rpc port and p2p port, and fill into app config
let mut node = Self::init(working_dir);
node.modify_app_config(|app_config| {
let rpc_port = find_available_port();
let p2p_port = find_available_port();
app_config.rpc.listen_address = format!("127.0.0.1:{}", rpc_port);
app_config.network.listen_addresses =
vec![format!("/ip4/127.0.0.1/tcp/{}", p2p_port).parse().unwrap()];
});
node
}
pub fn modify_app_config<M>(&mut self, modifier: M)
where
M: Fn(&mut CKBAppConfig),
{
let app_config_path = self.working_dir().join("ckb.toml");
let mut app_config: CKBAppConfig = {
let toml = fs::read(&app_config_path).unwrap();
CKBAppConfig::load_from_slice(&toml).unwrap()
};
modifier(&mut app_config);
fs::write(&app_config_path, toml::to_string(&app_config).unwrap()).unwrap();
*self = Self::init(self.working_dir());
}
pub fn modify_chain_spec<M>(&mut self, modifier: M)
where
M: Fn(&mut ChainSpec),
{
let chain_spec_path = self.working_dir().join("specs/integration.toml");
let chain_spec_res = Resource::file_system(chain_spec_path.clone());
let mut chain_spec = ChainSpec::load_from(&chain_spec_res).unwrap();
modifier(&mut chain_spec);
fs::write(&chain_spec_path, toml::to_string(&chain_spec).unwrap()).unwrap();
*self = Self::init(self.working_dir());
}
// Initialize Node instance based on working directory
fn init(working_dir: PathBuf) -> Self {
let app_config = {
let app_config_path = working_dir.join("ckb.toml");
let toml = fs::read(app_config_path).unwrap();
CKBAppConfig::load_from_slice(&toml).unwrap()
};
let mut chain_spec: ChainSpec = {
let chain_spec_path = working_dir.join("specs/integration.toml");
let chain_spec_res = Resource::file_system(chain_spec_path);
ChainSpec::load_from(&chain_spec_res).unwrap()
};
let p2p_listen = app_config.network.listen_addresses[0].to_string();
let rpc_address = app_config.rpc.listen_address;
let rpc_client = RpcClient::new(&format!("http://{}/", rpc_address));
let consensus = {
// Ensure the data path is available because chain_spec.build_consensus() needs to access the
// system-cell data.
chain_spec
.genesis
.system_cells
.iter_mut()
.for_each(|system_cell| {
system_cell.file.absolutize(&working_dir.join("specs"));
});
chain_spec.build_consensus().unwrap()
};
Self {
working_dir,
consensus,
p2p_listen,
rpc_client,
node_id: None,
guard: None,
}
}
pub fn rpc_client(&self) -> &RpcClient {
&self.rpc_client
}
pub fn working_dir(&self) -> PathBuf {
self.working_dir.clone()
}
pub fn log_path(&self) -> PathBuf {
self.working_dir().join("data/logs/run.log")
}
pub fn node_id(&self) -> &str {
// peer_id.to_base58()
self.node_id.as_ref().expect("uninitialized node_id")
}
pub fn consensus(&self) -> &Consensus {
&self.consensus
}
pub fn p2p_listen(&self) -> String {
self.p2p_listen.clone()
}
pub fn p2p_address(&self) -> String {
format!("{}/p2p/{}", self.p2p_listen(), self.node_id())
}
pub fn dep_group_tx_hash(&self) -> Byte32 {
self.consensus().genesis_block().transactions()[1].hash()
}
pub fn always_success_raw_data(&self) -> bytes::Bytes {
self.consensus().genesis_block().transactions()[0]
.outputs_data()
.get(SYSTEM_CELL_ALWAYS_SUCCESS_INDEX as usize)
.unwrap()
.raw_data()
}
pub fn always_success_script(&self) -> Script {
let always_success_raw = self.always_success_raw_data();
let always_success_code_hash = CellOutput::calc_data_hash(&always_success_raw);
Script::new_builder()
.code_hash(always_success_code_hash)
.hash_type(ScriptHashType::Data.into())
.build()
}
pub fn always_success_cell_dep(&self) -> CellDep {
let genesis_cellbase_hash = self.consensus().genesis_block().transactions()[0].hash();
let always_success_out_point =
OutPoint::new(genesis_cellbase_hash, SYSTEM_CELL_ALWAYS_SUCCESS_INDEX);
CellDep::new_builder()
.out_point(always_success_out_point)
.build()
}
pub fn connect(&self, peer: &Self) {
self.rpc_client()
.add_node(peer.node_id().to_string(), peer.p2p_listen());
let connected = wait_until(5, || {
self.rpc_client()
.get_peers()
.iter()
.any(|p| p.node_id == peer.node_id())
});
if !connected {
panic!("Connect outbound peer timeout, node id: {}", peer.node_id());
}
}
pub fn connect_uncheck(&self, peer: &Self) {
self.rpc_client()
.add_node(peer.node_id().to_string(), peer.p2p_listen());
}
// workaround for banned address checking (because we are using loopback address)
// 1. checking banned addresses is empty
// 2. connecting outbound peer and checking banned addresses is not empty
// 3. clear banned addresses
pub fn connect_and_wait_ban(&self, peer: &Self) {
let rpc_client = self.rpc_client();
assert!(
rpc_client.get_banned_addresses().is_empty(),
"banned addresses should be empty"
);
rpc_client.add_node(peer.node_id().to_string(), peer.p2p_listen());
let result = wait_until(10, || {
let banned_addresses = rpc_client.get_banned_addresses();
let result = !banned_addresses.is_empty();
banned_addresses.into_iter().for_each(|ban_address| {
rpc_client.set_ban(ban_address.address, "delete".to_owned(), None, None, None)
});
result
});
if !result {
panic!(
"Connect and wait ban outbound peer timeout, node id: {}",
peer.node_id()
);
}
}
// TODO it will be removed out later, in another PR
pub fn disconnect(&self, peer: &Self) {
self.rpc_client().remove_node(peer.node_id().to_string());
let disconnected = wait_until(5, || {
self.rpc_client()
.get_peers()
.iter()
.all(|p| p.node_id != peer.node_id())
&& peer
.rpc_client()
.get_peers()
.iter()
.all(|p| p.node_id != self.node_id())
});
if !disconnected {
panic!("Disconnect timeout, node {}", peer.node_id());
}
}
pub fn submit_block(&self, block: &BlockView) -> Byte32 {
let hash = self
.rpc_client()
.submit_block("".to_owned(), block.data().into())
.unwrap();
self.wait_for_tx_pool();
hash
}
pub fn process_block_without_verify(&self, block: &BlockView, broadcast: bool) -> Byte32 {
self.rpc_client()
.process_block_without_verify(block.data().into(), broadcast)
.unwrap()
}
// Convenient way to construct an uncle block
pub fn construct_uncle(&self) -> BlockView {
let block = self.new_block(None, None, None);
// Make sure the uncle block timestamp is different from
// the next block timestamp in main fork.
// Firstly construct uncle block which timestamp
// is less than the current time, and then generate
// the new block in main fork which timestamp is greater than
// or equal to the current time.
let timestamp = block.timestamp();
loop {
let timestamp_next: u64 = self
.rpc_client()
.get_block_template(None, None, None)
.current_time
.into();
if timestamp_next > timestamp {
break;
}
}
block
.as_advanced_builder()
.timestamp(timestamp.pack())
.build()
}
// generate a transaction which spend tip block's cellbase and send it to pool through rpc.
pub fn generate_transaction(&self) -> Byte32 {
self.submit_transaction(&self.new_transaction_spend_tip_cellbase())
}
// generate a transaction which spend tip block's cellbase
pub fn new_transaction_spend_tip_cellbase(&self) -> TransactionView {
let block = self.get_tip_block();
let cellbase = &block.transactions()[0];
self.new_transaction(cellbase.hash())
}
pub fn submit_transaction(&self, transaction: &TransactionView) -> Byte32 {
self.rpc_client()
.send_transaction(transaction.data().into())
}
pub fn remove_transaction(&self, tx_hash: Byte32) -> bool {
self.rpc_client().remove_transaction(tx_hash)
}
pub fn get_tip_block(&self) -> BlockView {
let rpc_client = self.rpc_client();
let tip_number = rpc_client.get_tip_block_number();
rpc_client
.get_block_by_number(tip_number)
.expect("tip block exists")
.into()
}
pub fn get_tip_block_number(&self) -> BlockNumber {
self.rpc_client().get_tip_block_number()
}
pub fn get_block(&self, hash: Byte32) -> BlockView {
self.rpc_client()
.get_block(hash)
.expect("block exists")
.into()
}
pub fn get_block_by_number(&self, number: BlockNumber) -> BlockView {
self.rpc_client()
.get_block_by_number(number)
.expect("block exists")
.into()
}
pub fn get_header_by_number(&self, number: BlockNumber) -> HeaderView {
self.rpc_client()
.get_header_by_number(number)
.expect("header exists")
.into()
}
/// The states of chain and txpool are updated asynchronously. Which means that the chain has
/// updated to the newest tip but txpool not.
/// get_tip_tx_pool_info wait to ensure the txpool update to the newest tip as well.
pub fn get_tip_tx_pool_info(&self) -> TxPoolInfo {
let tip_header = self.rpc_client().get_tip_header();
let tip_hash = &tip_header.hash;
let instant = Instant::now();
let mut recent = TxPoolInfo::default();
while instant.elapsed() < Duration::from_secs(10) {
let tx_pool_info = self.rpc_client().tx_pool_info();
if &tx_pool_info.tip_hash == tip_hash {
return tx_pool_info;
}
recent = tx_pool_info;
}
panic!(
"timeout to get_tip_tx_pool_info, tip_header={:?}, tx_pool_info: {:?}",
tip_header, recent
);
}
pub fn wait_for_tx_pool(&self) {
let rpc_client = self.rpc_client();
let mut chain_tip = rpc_client.get_tip_header();
let mut tx_pool_tip = rpc_client.tx_pool_info();
if chain_tip.hash == tx_pool_tip.tip_hash {
return;
}
let mut instant = Instant::now();
while instant.elapsed() < Duration::from_secs(10) {
sleep(std::time::Duration::from_secs(1));
chain_tip = rpc_client.get_tip_header();
let prev_tx_pool_tip = tx_pool_tip;
tx_pool_tip = rpc_client.tx_pool_info();
if chain_tip.hash == tx_pool_tip.tip_hash {
return;
} else if prev_tx_pool_tip.tip_hash != tx_pool_tip.tip_hash
&& tx_pool_tip.tip_number.value() < chain_tip.inner.number.value()
{
instant = Instant::now();
}
}
panic!(
"timeout to wait for tx pool,\n\tchain tip: {:?}, {:#x},\n\ttx-pool tip: {}, {:#x}",
chain_tip.inner.number.value(),
chain_tip.hash,
tx_pool_tip.tip_number.value(),
tx_pool_tip.tip_hash,
);
}
pub fn wait_tx_pool_ready(&self) {
let rpc_client = self.rpc_client();
while !rpc_client.tx_pool_ready() {
sleep(std::time::Duration::from_millis(200));
}
}
pub fn new_block(
&self,
bytes_limit: Option<u64>,
proposals_limit: Option<u64>,
max_version: Option<u32>,
) -> BlockView {
self.new_block_builder(bytes_limit, proposals_limit, max_version)
.build()
}
pub fn new_block_builder(
&self,
bytes_limit: Option<u64>,
proposals_limit: Option<u64>,
max_version: Option<u32>,
) -> BlockBuilder {
let template =
self.rpc_client()
.get_block_template(bytes_limit, proposals_limit, max_version);
Block::from(template).as_advanced_builder()
}
pub fn new_transaction(&self, hash: Byte32) -> TransactionView {
self.new_transaction_with_since(hash, 0)
}
pub fn new_transaction_with_since(&self, hash: Byte32, since: u64) -> TransactionView {
self.new_transaction_with_since_capacity(hash, since, capacity_bytes!(100))
}
pub fn new_transaction_with_since_capacity(
&self,
hash: Byte32,
since: u64,
capacity: Capacity,
) -> TransactionView {
let always_success_cell_dep = self.always_success_cell_dep();
let always_success_script = self.always_success_script();
core::TransactionBuilder::default()
.cell_dep(always_success_cell_dep)
.output(
CellOutputBuilder::default()
.capacity(capacity.pack())
.lock(always_success_script)
.build(),
)
.output_data(Default::default())
.input(CellInput::new(OutPoint::new(hash, 0), since))
.build()
}
pub fn assert_tx_pool_size(&self, pending_size: u64, proposed_size: u64) {
let tx_pool_info = self.get_tip_tx_pool_info();
assert_eq!(tx_pool_info.pending.value(), pending_size);
assert_eq!(tx_pool_info.proposed.value(), proposed_size);
}
pub fn assert_tx_pool_statics(&self, total_tx_size: u64, total_tx_cycles: u64) {
let tx_pool_info = self.get_tip_tx_pool_info();
assert_eq!(tx_pool_info.total_tx_size.value(), total_tx_size);
assert_eq!(tx_pool_info.total_tx_cycles.value(), total_tx_cycles);
}
pub fn assert_tx_pool_cycles(&self, total_tx_cycles: u64) {
let tx_pool_info = self.get_tip_tx_pool_info();
assert_eq!(tx_pool_info.total_tx_cycles.value(), total_tx_cycles);
}
pub fn assert_tx_pool_serialized_size(&self, total_tx_size: u64) {
let tx_pool_info = self.get_tip_tx_pool_info();
assert_eq!(tx_pool_info.total_tx_size.value(), total_tx_size);
}
pub fn start(&mut self) {
let mut child_process = Command::new(binary())
.env("RUST_BACKTRACE", "full")
.args(&[
"-C",
&self.working_dir().to_string_lossy().to_string(),
"run",
"--ba-advanced",
])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::inherit())
.spawn()
.expect("failed to run binary");
// Wait to ensure the node threads up
let node_info = loop {
if let Ok(local_node_info) = self.rpc_client().inner().local_node_info() {
let _ = self.rpc_client().tx_pool_info();
break local_node_info;
}
match child_process.try_wait() {
Ok(None) => sleep(std::time::Duration::from_secs(1)),
Ok(Some(status)) => {
error!(
"Error: node crashed: {}, log_path: {}",
status,
self.log_path().display()
);
process::exit(status.code().unwrap());
}
Err(error) => {
error!(
"Error: node crashed with reason: {}, log_path: {}",
error,
self.log_path().display()
);
process::exit(255);
}
}
};
self.wait_tx_pool_ready();
self.guard = Some(ProcessGuard {
child: child_process,
killed: false,
});
self.node_id = Some(node_info.node_id);
}
pub fn stop(&mut self) {
drop(self.guard.take())
}
#[cfg(not(target_os = "windows"))]
pub fn stop_gracefully(&mut self) {
if let Some(mut guard) = self.guard.take() {
if !guard.killed {
// send SIGINT to the child
nix::sys::signal::kill(
nix::unistd::Pid::from_raw(guard.child.id() as i32),
nix::sys::signal::Signal::SIGINT,
)
.expect("cannot send ctrl-c");
let _ = guard.child.wait();
guard.killed = true;
}
}
}
pub fn export(&self, target: String) {
Command::new(binary())
.args(&[
"export",
"-C",
&self.working_dir().to_string_lossy().to_string(),
&target,
])
.stdin(Stdio::null())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.expect("failed to execute process");
}
pub fn import(&self, target: String) {
Command::new(binary())
.args(&[
"import",
"-C",
&self.working_dir().to_string_lossy().to_string(),
&target,
])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::inherit())
.output()
.expect("failed to execute process");
}
}
// TODO it will be removed out later, in another PR
pub fn connect_all(nodes: &[Node]) {
for node_a in nodes.iter() {
for node_b in nodes.iter() {
if node_a.p2p_address() != node_b.p2p_address() {
node_a.connect(node_b);
}
}
}
}
// TODO it will be removed out later, in another PR
pub fn disconnect_all(nodes: &[Node]) {
for node_a in nodes.iter() {
for node_b in nodes.iter() {
if node_a.p2p_address() != node_b.p2p_address() {
node_a.disconnect(node_b);
}
}
}
}
// TODO it will be removed out later, in another PR
pub fn waiting_for_sync<N: Borrow<Node>>(nodes: &[N]) {
let mut tip_headers: HashSet<ckb_jsonrpc_types::HeaderView> =
HashSet::with_capacity(nodes.len());
// 60 seconds is a reasonable timeout to sync, even for poor CI server
let synced = wait_until(60, || {
tip_headers = nodes
.as_ref()
.iter()
.map(|node| node.borrow().rpc_client().get_tip_header())
.collect();
tip_headers.len() == 1
});
if !synced {
panic!("timeout to wait for sync, tip_headers: {:?}", tip_headers);
}
for node in nodes {
node.borrow().wait_for_tx_pool();
}
}
| 33.840735 | 105 | 0.56358 |
d92c6a83c341713f9a6c070c8613943c046c45af | 644 | // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
use super::dispatch_json::{JsonOp, Value};
use crate::ops::json_op;
use crate::resources::lock_resource_table;
use crate::state::ThreadSafeState;
use deno::*;
pub fn init(i: &mut Isolate, s: &ThreadSafeState) {
i.register_op("resources", s.core_op(json_op(s.stateful_op(op_resources))));
}
fn op_resources(
_state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
let resource_table = lock_resource_table();
let serialized_resources = resource_table.entries();
Ok(JsonOp::Sync(json!(serialized_resources)))
}
| 30.666667 | 78 | 0.743789 |
2258a4ec87200ad611b16c7eb273233df3e660da | 2,055 | use std::io::prelude::*;
use std::io::{Error, ErrorKind, Result};
use std::str;
const EXPECTED_CONNECT_START: &'static str = "OK MPD ";
#[cfg(windows)]
const LINE_ENDING: &'static str = "\r\n";
#[cfg(not(windows))]
const LINE_ENDING: &'static str = "\n";
fn main() {
println!("Hello, world!");
}
fn connect<R: BufRead, W: Write>(input: &mut R, output: &mut W) -> Result<()> {
let mut buf = String::new();
let num_bytes = input.read_line(&mut buf)?;
if num_bytes > EXPECTED_CONNECT_START.len() && buf.starts_with(EXPECTED_CONNECT_START) {
write!(output, "Successfully connected to ")?;
write!(output, "{}{}", &buf.trim()[3..], LINE_ENDING)?;
Ok(())
} else {
let message = format!("Unexpected welcome message from server: '{}'", buf);
write!(output, "{}", message)?;
Err(Error::new(ErrorKind::Other, message))
}
}
#[cfg(test)]
mod test {
use super::*;
mod connect_tests {
use super::*;
fn connect_to_server_with_repsonse(response: &[u8]) -> String {
use std::io::Cursor;
let mut input_buffer = Cursor::new(&response);
let mut output_buffer = Cursor::new(vec![]);
connect(&mut input_buffer, &mut output_buffer);
String::from_utf8(output_buffer.get_ref().to_vec()).unwrap()
}
#[test]
fn connect_reads_version() {
let server_response = b"OK MPD 0.21.11\n";
let expected_output = format!("Successfully connected to MPD 0.21.11{}", LINE_ENDING);
assert_eq!(
connect_to_server_with_repsonse(server_response),
expected_output
);
}
#[test]
fn connect_handles_unexpected_response() {
let server_response = b"some unexpected bytes";
let expected_output = "Unexpected";
assert_eq!(
&connect_to_server_with_repsonse(server_response)[0..expected_output.len()],
expected_output
);
}
}
}
| 31.615385 | 98 | 0.576156 |
162f90e6bb25c15e00670da4d2fafdaebcab5c1c | 3,077 | //! `μfmt` utilities
//!
//! # Minimum Supported Rust Version (MSRV)
//!
//! This crate is guaranteed to compile on stable Rust 1.36 and up. It *might* compile on older
//! versions but that may change in any new patch release.
#![deny(missing_docs)]
#![deny(rust_2018_compatibility)]
#![deny(rust_2018_idioms)]
#![deny(warnings)]
#![no_std]
use core::{convert::Infallible, str};
pub use heapless::consts;
use heapless::{ArrayLength, String};
use ufmt_write::uWrite;
macro_rules! assume_unreachable {
() => {
if cfg!(debug_assertions) {
panic!()
} else {
core::hint::unreachable_unchecked()
}
};
}
/// A write adapter that ignores all errors
pub struct Ignore<W>
where
W: uWrite,
{
writer: W,
}
impl<W> Ignore<W>
where
W: uWrite,
{
/// Creates a new `Ignore` adapter
pub fn new(writer: W) -> Self {
Self { writer }
}
/// Destroys the adapter and returns the underlying writer
pub fn free(self) -> W {
self.writer
}
}
impl<W> uWrite for Ignore<W>
where
W: uWrite,
{
type Error = Infallible;
fn write_str(&mut self, s: &str) -> Result<(), Infallible> {
let _ = self.writer.write_str(s);
Ok(())
}
}
/// A write adapter that buffers writes and automatically flushes on newlines
pub struct LineBuffered<W, N>
where
N: ArrayLength<u8>,
W: uWrite,
{
buffer: String<N>,
writer: W,
}
impl<W, N> LineBuffered<W, N>
where
N: ArrayLength<u8>,
W: uWrite,
{
/// Creates a new `LineBuffered` adapter
pub fn new(writer: W) -> Self {
Self {
buffer: String::new(),
writer,
}
}
/// Flushes the contents of the buffer
pub fn flush(&mut self) -> Result<(), W::Error> {
let ret = self.writer.write_str(&self.buffer);
self.buffer.clear();
ret
}
/// Destroys the adapter and returns the underlying writer
pub fn free(self) -> W {
self.writer
}
fn push_str(&mut self, s: &str) -> Result<(), W::Error> {
let len = s.as_bytes().len();
if self.buffer.len() + len > self.buffer.capacity() {
self.flush()?;
}
if len > self.buffer.capacity() {
self.writer.write_str(s)?;
} else {
self.buffer
.push_str(s)
.unwrap_or_else(|_| unsafe { assume_unreachable!() })
}
Ok(())
}
}
impl<W, N> uWrite for LineBuffered<W, N>
where
N: ArrayLength<u8>,
W: uWrite,
{
type Error = W::Error;
fn write_str(&mut self, mut s: &str) -> Result<(), W::Error> {
while let Some(pos) = s.as_bytes().iter().position(|b| *b == b'\n') {
let line = s
.get(..pos + 1)
.unwrap_or_else(|| unsafe { assume_unreachable!() });
self.push_str(line)?;
self.flush()?;
s = s
.get(pos + 1..)
.unwrap_or_else(|| unsafe { assume_unreachable!() });
}
self.push_str(s)
}
}
| 21.669014 | 95 | 0.544686 |
0a3351669da4e9baa7c372c0dcaddd331db0dff3 | 100 | use ant_simulator_rust::create_simulation;
fn main() {
let simulation = create_simulation();
}
| 16.666667 | 42 | 0.74 |
694e6ad262d4b72bb89b226db539bb1aea5e6a3e | 1,242 | extern crate random_access_memory as ram;
extern crate random_access_storage;
use random_access_storage::RandomAccess;
#[test]
fn can_call_new() {
let _file = ram::RandomAccessMemory::default();
}
#[test]
fn can_open_buffer() {
let mut file = ram::RandomAccessMemory::default();
file.write(0, b"hello").unwrap();
}
#[test]
fn can_write() {
let mut file = ram::RandomAccessMemory::default();
file.write(0, b"hello").unwrap();
file.write(5, b" world").unwrap();
}
#[test]
fn can_read() {
let mut file = ram::RandomAccessMemory::default();
file.write(0, b"hello").unwrap();
file.write(5, b" world").unwrap();
let text = file.read(0, 11).unwrap();
let text = String::from_utf8(text.to_vec()).unwrap();
assert_eq!(text, "hello world");
}
#[test]
fn can_len() {
let mut file = ram::RandomAccessMemory::default();
assert_eq!(file.len().unwrap(), 0);
file.write(0, b"hello").unwrap();
assert_eq!(file.len().unwrap(), 5);
file.write(5, b" world").unwrap();
assert_eq!(file.len().unwrap(), 11);
}
#[test]
fn can_is_empty() {
let mut file = ram::RandomAccessMemory::default();
assert_eq!(file.is_empty().unwrap(), true);
file.write(0, b"hello").unwrap();
assert_eq!(file.is_empty().unwrap(), false);
}
| 24.352941 | 55 | 0.662641 |
f42a18489f0f7020d0db20626828ed4522da4221 | 2,122 | use ray::Ray3;
use randomutil::{
seedable_unit_distribution,
random_in_unit_disk,
};
use cgmath::{
Point3,
Vector3,
EuclideanSpace,
InnerSpace,
};
use std::f32;
use std::time::{
Duration,
Instant,
};
use timeutil::TimeUtil;
#[derive(Copy, Clone)]
pub struct Camera {
origin: Point3<f32>,
lower_left_corner: Vector3<f32>,
horizontal: Vector3<f32>,
vertical: Vector3<f32>,
lens_radius: f32,
u: Vector3<f32>,
v: Vector3<f32>,
aperture_open_time: Instant,
aperture_duration: Duration,
}
impl Camera {
pub fn new(look_from: Point3<f32>,
look_at: Vector3<f32>,
up: Vector3<f32>,
vfov: f32,
aspect: f32,
aperture: f32,
focus_dist: f32,
aperture_open_time: Instant,
aperture_duration: Duration) -> Self {
let lens_radius = aperture / 2.0;
let theta = vfov * f32::consts::PI / 180.0;
let half_height = (theta / 2.0).tan();
let half_width = aspect * half_height;
let w = (look_from.to_vec() - look_at).normalize();
let u = up.cross(w);
let v = w.cross(u);
Camera {
lower_left_corner: look_from.to_vec() - half_width * focus_dist * u - half_height * focus_dist * v - focus_dist * w,
horizontal: 2.0 * half_width * focus_dist * u,
vertical: 2.0 * half_height * focus_dist * v,
origin: look_from,
lens_radius,
u,
v,
aperture_open_time,
aperture_duration,
}
}
pub fn get_ray(&self, s: f32, t: f32) -> Ray3<f32> {
let rd: Vector3<f32> = self.lens_radius * random_in_unit_disk();
let offset: Vector3<f32> = self.u * rd.x + self.v * rd.y;
let time = self.aperture_open_time + self.aperture_duration.mul_decimal(seedable_unit_distribution());
Ray3::new(self.origin + offset,
self.lower_left_corner + s * self.horizontal + t * self.vertical - self.origin.to_vec() - offset,
time)
}
} | 29.068493 | 128 | 0.567861 |
edce30389445d571f749c4e30ec94a306367571a | 5,660 | // Copyright (c) 2017 Maxime “pep” Buquet <[email protected]>
// Copyright (c) 2017 Emmanuel Gil Peyrot <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
use crate::date::DateTime;
use crate::presence::PresencePayload;
generate_element!(
/// Represents the query for messages before our join.
#[derive(Default)]
History, "history", MUC,
attributes: [
/// How many characters of history to send, in XML characters.
maxchars: Option<u32> = "maxchars",
/// How many messages to send.
maxstanzas: Option<u32> = "maxstanzas",
/// Only send messages received in these last seconds.
seconds: Option<u32> = "seconds",
/// Only send messages after this date.
since: Option<DateTime> = "since",
]
);
impl History {
/// Create a new empty history element.
pub fn new() -> Self {
History::default()
}
/// Set how many characters of history to send.
pub fn with_maxchars(mut self, maxchars: u32) -> Self {
self.maxchars = Some(maxchars);
self
}
/// Set how many messages to send.
pub fn with_maxstanzas(mut self, maxstanzas: u32) -> Self {
self.maxstanzas = Some(maxstanzas);
self
}
/// Only send messages received in these last seconds.
pub fn with_seconds(mut self, seconds: u32) -> Self {
self.seconds = Some(seconds);
self
}
/// Only send messages received since this date.
pub fn with_since(mut self, since: DateTime) -> Self {
self.since = Some(since);
self
}
}
generate_element!(
/// Represents a room join request.
#[derive(Default)]
Muc, "x", MUC, children: [
/// Password to use when the room is protected by a password.
password: Option<String> = ("password", MUC) => String,
/// Controls how much and how old we want to receive history on join.
history: Option<History> = ("history", MUC) => History
]
);
impl PresencePayload for Muc {}
impl Muc {
/// Create a new MUC join element.
pub fn new() -> Self {
Muc::default()
}
/// Join a room with this password.
pub fn with_password(mut self, password: String) -> Self {
self.password = Some(password);
self
}
/// Join a room with only that much history.
pub fn with_history(mut self, history: History) -> Self {
self.history = Some(history);
self
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::error::Error;
use crate::Element;
use std::convert::TryFrom;
use std::str::FromStr;
#[test]
fn test_muc_simple() {
let elem: Element = "<x xmlns='http://jabber.org/protocol/muc'/>"
.parse()
.unwrap();
Muc::try_from(elem).unwrap();
}
#[test]
fn test_muc_invalid_child() {
let elem: Element = "<x xmlns='http://jabber.org/protocol/muc'><coucou/></x>"
.parse()
.unwrap();
let error = Muc::try_from(elem).unwrap_err();
let message = match error {
Error::ParseError(string) => string,
_ => panic!(),
};
assert_eq!(message, "Unknown child in x element.");
}
#[test]
fn test_muc_serialise() {
let elem: Element = "<x xmlns='http://jabber.org/protocol/muc'/>"
.parse()
.unwrap();
let muc = Muc {
password: None,
history: None,
};
let elem2 = muc.into();
assert_eq!(elem, elem2);
}
#[cfg(not(feature = "disable-validation"))]
#[test]
fn test_muc_invalid_attribute() {
let elem: Element = "<x xmlns='http://jabber.org/protocol/muc' coucou=''/>"
.parse()
.unwrap();
let error = Muc::try_from(elem).unwrap_err();
let message = match error {
Error::ParseError(string) => string,
_ => panic!(),
};
assert_eq!(message, "Unknown attribute in x element.");
}
#[test]
fn test_muc_simple_password() {
let elem: Element =
"<x xmlns='http://jabber.org/protocol/muc'><password>coucou</password></x>"
.parse()
.unwrap();
let elem1 = elem.clone();
let muc = Muc::try_from(elem).unwrap();
assert_eq!(muc.password, Some("coucou".to_owned()));
let elem2 = Element::from(muc);
assert_eq!(elem1, elem2);
}
#[test]
fn history() {
let elem: Element = "
<x xmlns='http://jabber.org/protocol/muc'>
<history maxstanzas='0'/>
</x>"
.parse()
.unwrap();
let muc = Muc::try_from(elem).unwrap();
let muc2 = Muc::new().with_history(History::new().with_maxstanzas(0));
assert_eq!(muc, muc2);
let history = muc.history.unwrap();
assert_eq!(history.maxstanzas, Some(0));
assert_eq!(history.maxchars, None);
assert_eq!(history.seconds, None);
assert_eq!(history.since, None);
let elem: Element = "
<x xmlns='http://jabber.org/protocol/muc'>
<history since='1970-01-01T00:00:00Z'/>
</x>"
.parse()
.unwrap();
let muc = Muc::try_from(elem).unwrap();
assert_eq!(
muc.history.unwrap().since.unwrap(),
DateTime::from_str("1970-01-01T00:00:00+00:00").unwrap()
);
}
}
| 29.025641 | 87 | 0.557067 |
7ab5b12bb46414a266e8e85f3edb61e0b794d8c5 | 2,000 | use swc_common::util::take::Take;
use swc_ecma_ast::*;
use swc_ecma_visit::{noop_visit_mut_type, VisitMut, VisitMutWith};
use crate::{option::CompressOptions, DISABLE_BUGGY_PASSES};
pub fn postcompress_optimizer(options: &CompressOptions) -> impl '_ + VisitMut {
PostcompressOptimizer { options }
}
struct PostcompressOptimizer<'a> {
options: &'a CompressOptions,
}
impl PostcompressOptimizer<'_> {
fn optimize_in_bool_ctx(&mut self, e: &mut Expr) {
if !self.options.bools {
return;
}
// This is buggy
if DISABLE_BUGGY_PASSES {
return;
}
// Note: `||` is not handled because of precedence.
if let Expr::Bin(BinExpr {
op: op @ op!("&&"),
right,
left,
..
}) = e
{
if let Expr::Bin(BinExpr { op: op!("&&"), .. }) = &**left {
return;
}
match &mut **right {
Expr::Unary(UnaryExpr {
op: op!("!"), arg, ..
}) if arg.is_ident() => {
let new_op = if *op == op!("&&") {
op!("||")
} else {
op!("&&")
};
tracing::debug!(
"bools: `(a {} !b)` => `(a {} b)` (in bool context)",
*op,
new_op
);
*op = new_op;
*right = arg.take();
}
_ => {}
}
}
}
}
impl VisitMut for PostcompressOptimizer<'_> {
noop_visit_mut_type!();
fn visit_mut_cond_expr(&mut self, e: &mut CondExpr) {
e.visit_mut_children_with(self);
self.optimize_in_bool_ctx(&mut *e.test);
}
fn visit_mut_if_stmt(&mut self, s: &mut IfStmt) {
s.visit_mut_children_with(self);
self.optimize_in_bool_ctx(&mut *s.test);
}
}
| 25.974026 | 80 | 0.452 |
487e4513602c0c2b2c12fb253ed20743ad2d53bb | 861 | // option2.rs
// Make me compile! Execute `rustlings hint option2` for hints
// I AM NOT DONE
fn main() {
let optional_value = Some(String::from("rustlings"));
// TODO: Make this an if let statement whose value is "Some" type
if let Some(value) = optional_value {
println!("the value of optional value is: {}", value);
} else {
println!("The optional value doesn't contain anything!");
}
let mut optional_values_vec: Vec<Option<i8>> = Vec::new();
for x in 1..10 {
optional_values_vec.push(Some(x));
}
// TODO: make this a while let statement - remember that vector.pop also adds another layer of Option<T>
// You can stack `Option<T>`'s into while let and if let
while let Some(value) = optional_values_vec.pop().unwrap_or_default() {
println!("current value: {}", value);
}
}
| 33.115385 | 108 | 0.641115 |
ab6baa5025a8383b1887f147f93e0a164799b8aa | 3,210 | use crate::raw_alaya_tx::RawTransaction;
use crate::{AlayaSigner, SignerError};
use parity_crypto::publickey::sign;
use zkdpos_types::tx::{PackedAtpSignature, TxAtpSignature};
use zkdpos_types::{Address, H256};
#[derive(Clone)]
pub struct PrivateKeySigner {
private_key: H256,
}
impl std::fmt::Debug for PrivateKeySigner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "PrivateKeySigner")
}
}
impl PrivateKeySigner {
pub fn new(private_key: H256) -> Self {
Self { private_key }
}
}
#[async_trait::async_trait]
impl AlayaSigner for PrivateKeySigner {
/// Get Alaya address that matches the private key.
async fn get_address(&self) -> Result<Address, SignerError> {
PackedAtpSignature::address_from_private_key(&self.private_key)
.map_err(|_| SignerError::DefineAddress)
}
/// The sign method calculates an Alaya specific signature with:
/// sign(keccak256("\x19Alaya Signed Message:\n" + len(message) + message))).
async fn sign_message(&self, message: &[u8]) -> Result<TxAtpSignature, SignerError> {
let pack = PackedAtpSignature::sign(&self.private_key, &message)
.map_err(|err| SignerError::SigningFailed(err.to_string()))?;
Ok(TxAtpSignature::AlayaSignature(pack))
}
/// Signs and returns the RLP-encoded transaction.
async fn sign_transaction(&self, raw_tx: RawTransaction) -> Result<Vec<u8>, SignerError> {
let sig = sign(&self.private_key.into(), &raw_tx.hash().into())
.map_err(|_| SignerError::NoSigningKey)?;
Ok(raw_tx.rlp_encode_tx(sig))
}
}
// #[cfg(test)]
// mod test {
// use super::PrivateKeySigner;
// use super::RawTransaction;
// use crate::AlayaSigner;
// use zkdpos_types::{H160, H256, U256};
// #[tokio::test]
// async fn test_generating_signature() {
// let private_key = H256::from([5; 32]);
// let signer = PrivateKeySigner::new(private_key);
// let raw_transaction = RawTransaction {
// chain_id: 201018,
// nonce: U256::from(1),
// to: Some(H160::zero()),
// value: U256::from(10),
// gas_price: U256::from(1),
// gas: U256::from(2),
// data: vec![1, 2, 3],
// };
// let signature = signer
// .sign_transaction(raw_transaction.clone())
// .await
// .unwrap();
// assert_ne!(signature.len(), 1);
// // precalculated signature with right algorithm implementation
// let precalculated_signature: Vec<u8> = vec![
// 248, 96, 1, 1, 2, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10,
// 131, 1, 2, 3, 37, 160, 152, 202, 15, 174, 50, 167, 190, 239, 206, 183, 109, 215, 135,
// 60, 43, 71, 11, 74, 252, 97, 83, 86, 66, 249, 237, 111, 118, 121, 105, 214, 130, 249,
// 160, 106, 110, 143, 138, 113, 12, 177, 239, 121, 188, 247, 21, 236, 236, 163, 254, 28,
// 48, 250, 5, 20, 234, 54, 58, 162, 103, 252, 20, 243, 121, 7, 19,
// ];
// assert_eq!(signature, precalculated_signature);
// }
// }
| 37.325581 | 101 | 0.586293 |
ebf0c48e2c6f9528a083b70e332b33a01e8a6116 | 368 | #[doc = "Reader of register re_en"]
pub type R = crate::R<u32, super::RE_EN>;
#[doc = "Writer for register re_en"]
pub type W = crate::W<u32, super::RE_EN>;
#[doc = "Register re_en `reset()`'s with value 0"]
impl crate::ResetValue for super::RE_EN {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
impl R {}
impl W {}
| 24.533333 | 50 | 0.603261 |
d5416286f359330d38a25dda2fbae430886e7688 | 3,523 | // Copyright 2018 Palantir Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use serde_bytes::ByteBuf;
use std::f64;
use std::fmt::Debug;
fn serialize<T>(value: &T) -> String
where
T: Serialize,
{
let mut buf = vec![];
value
.serialize(&mut crate::json::Serializer::new(&mut buf))
.unwrap();
String::from_utf8(buf).unwrap()
}
fn deserialize_client<T>(json: &str) -> T
where
T: DeserializeOwned,
{
let mut de = crate::json::ClientDeserializer::from_str(json);
let v = T::deserialize(&mut de).unwrap();
de.end().unwrap();
v
}
fn deserialize_server<T>(json: &str) -> T
where
T: DeserializeOwned,
{
let mut de = crate::json::ServerDeserializer::from_str(json);
let v = T::deserialize(&mut de).unwrap();
de.end().unwrap();
v
}
fn test_ser<T>(ty: &T, expected_json: &str)
where
T: Serialize,
{
let actual_json = serialize(ty);
let expected_value = serde_json::from_str::<serde_json::Value>(expected_json).unwrap();
let actual_value = serde_json::from_str::<serde_json::Value>(&actual_json).unwrap();
assert_eq!(expected_value, actual_value);
}
fn test_de<T>(ty: &T, json: &str)
where
T: DeserializeOwned + PartialEq + Debug,
{
let deserialized = deserialize_client(json);
assert_eq!(*ty, deserialized);
let deserialized = deserialize_client(json);
assert_eq!(*ty, deserialized);
}
fn test_serde<T>(ty: &T, expected_json: &str)
where
T: Serialize + DeserializeOwned + PartialEq + Debug,
{
test_ser(ty, expected_json);
test_de(ty, expected_json);
}
#[test]
fn binary_serde() {
test_serde(&ByteBuf::from(b"foobar".to_vec()), r#""Zm9vYmFy""#);
}
#[allow(clippy::float_cmp)]
fn test_doubles(value: f64, string: &str) {
let json = format!(r#""{}""#, string);
test_ser(&value, &json);
let deserialized = deserialize_client::<f64>(&json);
assert!((value.is_nan() && deserialized.is_nan()) || value == deserialized);
let deserialized = deserialize_server::<f64>(&json);
assert!((value.is_nan() && deserialized.is_nan()) || value == deserialized);
}
#[test]
fn nonfinite_doubles() {
test_doubles(f64::INFINITY, "Infinity");
test_doubles(f64::NEG_INFINITY, "-Infinity");
test_doubles(f64::NAN, "NaN");
}
#[derive(Deserialize, Debug, PartialEq)]
struct Foo {
foo: i32,
}
#[test]
fn client_unknown_fields() {
let deserialized = deserialize_client::<Foo>(
r#"
{
"foo": 1,
"bogus": "hello"
}
"#,
);
assert_eq!(Foo { foo: 1 }, deserialized);
}
#[test]
fn server_unknown_fields() {
let json = r#"
{
"foo": 1,
"bogus": "hello"
}
"#;
let e = Foo::deserialize(&mut crate::json::ServerDeserializer::from_str(json))
.err()
.unwrap();
assert!(e.is_data());
assert!(e.to_string().contains("foo"));
assert!(e.to_string().contains("bogus"));
}
| 25.164286 | 91 | 0.644337 |
90a49e2e347292818cca83d1821b309525098ddb | 4,653 | mod tower_generator;
mod tree_generator;
use common::block::*;
use rand::prelude::*;
pub use tower_generator::TowerGenerator;
pub use tree_generator::TreeGenerator;
pub trait ObjectGenerator {
/// Pre-generate an object to be placed in the world
fn generate(&mut self) -> PregeneratedObject;
}
/// Container for a pregenerated object, with some helper functions
#[derive(Clone)]
pub struct PregeneratedObject {
pub anchor_x: usize,
pub anchor_y: usize,
pub anchor_z: usize,
pub size_x: usize,
pub size_y: usize,
pub size_z: usize,
pub foundation_block: Option<Block>,
pub place_on_soil: bool,
pub overwrite_non_empty: bool,
pub blocks: Vec<Block>,
}
impl PregeneratedObject {
pub fn new(size_x: usize, size_y: usize, size_z: usize) -> Self {
PregeneratedObject {
anchor_x: size_x / 2,
anchor_y: size_y / 2,
anchor_z: 0,
size_x,
size_y,
size_z,
foundation_block: None,
place_on_soil: false,
overwrite_non_empty: false,
blocks: vec![Block::empty_block(); size_x * size_y * size_z],
}
}
pub fn solid(
size_x: usize,
size_y: usize,
size_z: usize,
block: Block,
foundation_block: Block,
) -> Self {
let mut object = PregeneratedObject::new(size_x, size_y, size_z);
for x in 0..size_x {
for y in 0..size_y {
for z in 0..size_z {
object.set(x, y, z, block);
}
}
}
object.foundation_block = Some(foundation_block);
object
}
pub fn get(&self, x: usize, y: usize, z: usize) -> Block {
self.blocks[z + y * self.size_z + x * self.size_z * self.size_y]
}
pub fn set(&mut self, x: usize, y: usize, z: usize, block: Block) {
self.blocks[z + y * self.size_z + x * self.size_z * self.size_y] = block;
}
pub fn set_rectangle(
&mut self,
x1: usize,
y1: usize,
z1: usize,
x2: usize,
y2: usize,
z2: usize,
block: Block,
checkerboard: bool,
) {
for rz in z1..z2 {
for rx in x1..x2 {
if !checkerboard || rx % 2 != 0 {
self.set(rx, y1, rz, block);
self.set(rx, y2 - 1, rz, block);
}
}
for ry in y1..y2 {
if !checkerboard || ry % 2 != 0 {
self.set(x1, ry, rz, block);
self.set(x2 - 1, ry, rz, block);
}
}
}
}
pub fn set_filled_rectangle(
&mut self,
x1: usize,
y1: usize,
z1: usize,
x2: usize,
y2: usize,
z2: usize,
block: Block,
) {
for rx in x1..x2 {
for ry in y1..y2 {
for rz in z1..z2 {
self.set(rx, ry, rz, block);
}
}
}
}
pub fn fill_sphere(&mut self, x: usize, y: usize, z: usize, range: usize, block: Block) {
let range_sq = (range * range) as isize;
for rx in x - range..x + range {
for ry in y - range..y + range {
for rz in z - range..z + range {
let dx = rx as isize - x as isize;
let dy = ry as isize - y as isize;
let dz = rz as isize - z as isize;
let dist_sq = dx * dx + dy * dy + dz * dz;
if dist_sq <= range_sq {
self.set(rx, ry, rz, block);
}
}
}
}
}
/// Place a sphere where a random 80% of the blocks are placed
pub fn spray_sphere(
&mut self,
x: usize,
y: usize,
z: usize,
range: usize,
block: Block,
random: &mut StdRng,
) {
let range_sq = (range * range) as isize;
for rx in x - range..x + range {
for ry in y - range..y + range {
for rz in z - range..z + range {
let dx = rx as isize - x as isize;
let dy = ry as isize - y as isize;
let dz = rz as isize - z as isize;
let dist_sq = dx * dx + dy * dy + dz * dz;
if dist_sq <= range_sq {
if random.next_u32() % 10 > 2 {
self.set(rx, ry, rz, block);
}
}
}
}
}
}
}
| 28.546012 | 93 | 0.460348 |
14660b2160c70f7047b8596b4c03372dd42d4e8e | 19,689 | use {ErrorCode, IndyHandle, IndyError};
use std::ffi::CString;
use std::ptr::null;
use futures::Future;
use ffi::payments;
use ffi::{ResponseStringCB,
ResponseStringStringCB};
use utils::callbacks::{ClosureHandler, ResultHandler};
/// Create the payment address for specified payment method
///
/// This method generates private part of payment address
/// and stores it in a secure place. Ideally it should be
/// secret in libindy wallet (see crypto module).
///
/// Note that payment method should be able to resolve this
/// secret by fully resolvable payment address format.
///
/// # Arguments
/// * `wallet_handle` - wallet handle where to save new address
/// * `payment_method` - payment method to use (for example, 'sov')
/// * `config` - payment address config as json
///
/// # Example
/// config
/// {
/// seed: <str>, // allows deterministic creation of payment address
/// }
///
/// # Returns
/// * `payment_address` - public identifier of payment address in fully resolvable payment address format
pub fn create_payment_address(wallet_handle: IndyHandle, payment_method: &str, config: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _create_payment_address(command_handle, wallet_handle, payment_method, config, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _create_payment_address(command_handle: IndyHandle, wallet_handle: IndyHandle, payment_method: &str, config: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let payment_method = c_str!(payment_method);
let config = c_str!(config);
ErrorCode::from(unsafe { payments::indy_create_payment_address(command_handle, wallet_handle, payment_method.as_ptr(), config.as_ptr(), cb) })
}
/// Lists all payment addresses that are stored in the wallet
///
/// # Arguments
/// * `wallet_handle` - wallet to search for payment_addresses
///
/// # Returns
/// * `payment_addresses_json` - json array of string with json addresses
pub fn list_payment_addresses(wallet_handle: IndyHandle) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _list_payment_addresses(command_handle, wallet_handle, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _list_payment_addresses(command_handle: IndyHandle, wallet_handle: IndyHandle, cb: Option<ResponseStringCB>) -> ErrorCode {
ErrorCode::from(unsafe { payments::indy_list_payment_addresses(command_handle, wallet_handle, cb) })
}
/// Modifies Indy request by adding information how to pay fees for this transaction
/// according to selected payment method.
///
/// Payment selection is performed by looking to o
///
/// This method consumes set of UTXO inputs and outputs. The difference between inputs balance
/// and outputs balance is the fee for this transaction.
///
/// Not that this method also produces correct fee signatures.
///
/// Format of inputs is specific for payment method. Usually it should reference payment transaction
/// with at least one output that corresponds to payment address that user owns.
///
/// # Arguments
/// * `wallet_handle` - wallet handle
/// * `submitter_did` - DID of request sender
/// * `req_json` - initial transaction request as json
/// * `inputs_json` - the list of UTXO inputs as json array
///
/// # Examples
/// inputs_json:
/// ["input1", ...]
/// Notes:
/// - each input should reference paymentAddress
/// - this param will be used to determine payment_method
/// outputs_json: The list of UTXO outputs as json array:
/// [{
/// paymentAddress: <str>, // payment address used as output
/// amount: <int>, // amount of tokens to transfer to this payment address
/// extra: <str>, // optional data
/// }]
///
/// # Returns
/// * `req_with_fees_json` - modified Indy request with added fees info
/// * `payment_method`
pub fn add_request_fees(wallet_handle: IndyHandle,
submitter_did: Option<&str>,
req_json: &str,
inputs_json: &str,
outputs_json: &str,
extra: Option<&str>) -> Box<Future<Item=(String, String), Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string_string();
let err = _add_request_fees(command_handle, wallet_handle, submitter_did, req_json, inputs_json, outputs_json, extra, cb);
ResultHandler::str_str(command_handle, err, receiver)
}
fn _add_request_fees(command_handle: IndyHandle,
wallet_handle: IndyHandle,
submitter_did: Option<&str>,
req_json: &str,
inputs_json: &str,
outputs_json: &str,
extra: Option<&str>,
cb: Option<ResponseStringStringCB>) -> ErrorCode {
let submitter_did_str = opt_c_str!(submitter_did);
let req_json = c_str!(req_json);
let inputs_json = c_str!(inputs_json);
let outputs_json = c_str!(outputs_json);
let extra_str = opt_c_str!(extra);
ErrorCode::from(unsafe {
payments::indy_add_request_fees(command_handle,
wallet_handle,
opt_c_ptr!(submitter_did, submitter_did_str),
req_json.as_ptr(),
inputs_json.as_ptr(),
outputs_json.as_ptr(),
opt_c_ptr!(extra, extra_str),
cb)
})
}
/// Parses response for Indy request with fees.
///
/// # Arguments
/// * `payment_method`
/// * `resp_json`: response for Indy request with fees
/// Note: this param will be used to determine payment_method
///
/// # Returns
/// * `utxo_json` - parsed (payment method and node version agnostic) utxo info as json
///
/// # Example
/// utxo_json
/// [{
/// input: <str>, // UTXO input
/// amount: <int>, // amount of tokens in this input
/// extra: <str>, // optional data from payment transaction
/// }]
pub fn parse_response_with_fees(payment_method: &str, resp_json: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _parse_response_with_fees(command_handle, payment_method, resp_json, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _parse_response_with_fees(command_handle: IndyHandle, payment_method: &str, resp_json: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let payment_method = c_str!(payment_method);
let resp_json = c_str!(resp_json);
ErrorCode::from(unsafe { payments::indy_parse_response_with_fees(command_handle, payment_method.as_ptr(), resp_json.as_ptr(), cb) })
}
/// Builds Indy request for getting UTXO list for payment address
/// according to this payment method.
///
/// # Arguments
/// * `wallet_handle` - wallet handle
/// * `submitter_did` - DID of request sender
/// * `payment_address` -: target payment address
///
/// # Returns
/// * `get_utxo_txn_json` - Indy request for getting UTXO list for payment address
/// * `payment_method`
pub fn build_get_payment_sources_request(wallet_handle: IndyHandle, submitter_did: Option<&str>, payment_address: &str) -> Box<Future<Item=(String, String), Error=IndyError>> {
let (receiver, command_handle, cb) =
ClosureHandler::cb_ec_string_string();
let err = _build_get_payment_sources_request(command_handle, wallet_handle, submitter_did, payment_address, cb);
ResultHandler::str_str(command_handle, err, receiver)
}
fn _build_get_payment_sources_request(command_handle: IndyHandle, wallet_handle: IndyHandle, submitter_did: Option<&str>, payment_address: &str, cb: Option<ResponseStringStringCB>) -> ErrorCode {
let submitter_did_str = opt_c_str!(submitter_did);
let payment_address = c_str!(payment_address);
ErrorCode::from(unsafe { payments::indy_build_get_payment_sources_request(command_handle, wallet_handle, opt_c_ptr!(submitter_did, submitter_did_str), payment_address.as_ptr(), cb) })
}
/// Parses response for Indy request for getting UTXO list.
///
/// # Arguments
/// * `payment_method`
/// * `resp_json` - response for Indy request for getting UTXO list
/// Note: this param will be used to determine payment_method
///
/// # Returns
/// * `utxo_json` - parsed (payment method and node version agnostic) utxo info as json:
/// # Examples:
/// [{
/// input: <str>, // UTXO input
/// amount: <int>, // amount of tokens in this input
/// extra: <str>, // optional data from payment transaction
/// }]
pub fn parse_get_payment_sources_response(payment_method: &str, resp_json: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _parse_get_payment_sources_response(command_handle, payment_method, resp_json, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _parse_get_payment_sources_response(command_handle: IndyHandle, payment_method: &str, resp_json: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let payment_method = c_str!(payment_method);
let resp_json = c_str!(resp_json);
ErrorCode::from(unsafe { payments::indy_parse_get_payment_sources_response(command_handle, payment_method.as_ptr(), resp_json.as_ptr(), cb) })
}
/// Builds Indy request for doing tokens payment
/// according to this payment method.
///
/// This method consumes set of UTXO inputs and outputs.
///
/// Format of inputs is specific for payment method. Usually it should reference payment transaction
/// with at least one output that corresponds to payment address that user owns.
///
/// # Arguments
/// * `wallet_handle` - wallet handle
/// * `submitter_did` - DID of request sender
/// * `inputs_json` - The list of UTXO inputs as json array:
/// ["input1", ...]
/// Note that each input should reference paymentAddress
/// * `outputs_json` - The list of UTXO outputs as json array:
/// [{
/// paymentAddress: <str>, // payment address used as output
/// amount: <int>, // amount of tokens to transfer to this payment address
/// extra: <str>, // optional data
/// }]
///
/// # Returns
/// * `payment_req_json` - Indy request for doing tokens payment
/// * `payment_method`
pub fn build_payment_req(wallet_handle: IndyHandle, submitter_did: Option<&str>, inputs: &str, outputs: &str, extra: Option<&str>) -> Box<Future<Item=(String, String), Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string_string();
let err = _build_payment_req(command_handle, wallet_handle, submitter_did, inputs, outputs, extra, cb);
ResultHandler::str_str(command_handle, err, receiver)
}
fn _build_payment_req(command_handle: IndyHandle, wallet_handle: IndyHandle, submitter_did: Option<&str>, inputs: &str, outputs: &str, extra: Option<&str>, cb: Option<ResponseStringStringCB>) -> ErrorCode {
let submitter_did_str = opt_c_str!(submitter_did);
let inputs = c_str!(inputs);
let outputs = c_str!(outputs);
let extra_str = opt_c_str!(extra);
ErrorCode::from(unsafe {
payments::indy_build_payment_req(command_handle,
wallet_handle,
opt_c_ptr!(submitter_did, submitter_did_str),
inputs.as_ptr(),
outputs.as_ptr(),
opt_c_ptr!(extra, extra_str),
cb)
})
}
/// Parses response for Indy request for payment txn.
///
/// # Arguments
/// * `command_handle`
/// * `payment_method`
/// * `resp_json` - response for Indy request for payment txn
/// Note: this param will be used to determine payment_method
///
/// # Returns
/// * `utxo_json` - parsed (payment method and node version agnostic) utxo info as jso-n
/// [{
/// input: <str>, // UTXO input
/// amount: <int>, // amount of tokens in this input
/// extra: <str>, // optional data from payment transaction
/// }]
pub fn parse_payment_response(payment_method: &str, resp_json: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _parse_payment_response(command_handle, payment_method, resp_json, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _parse_payment_response(command_handle: IndyHandle, payment_method: &str, resp_json: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let payment_method = c_str!(payment_method);
let resp_json = c_str!(resp_json);
ErrorCode::from(unsafe { payments::indy_parse_payment_response(command_handle, payment_method.as_ptr(), resp_json.as_ptr(), cb) })
}
/// Builds Indy request for doing tokens minting
/// according to this payment method.
///
/// # Arguments
/// * `wallet_handle` - wallet handle
/// * `submitter_did` - DID of request sender
/// * `outputs_json` - The list of UTXO outputs as json array:
/// [{
/// paymentAddress: <str>, // payment address used as output
/// amount: <int>, // amount of tokens to transfer to this payment address
/// extra: <str>, // optional data
/// }]
///
/// # Returns
/// * `mint_req_json` - Indy request for doing tokens minting
/// * `payment_method`
pub fn build_mint_req(wallet_handle: IndyHandle, submitter_did: Option<&str>, outputs_json: &str, extra: Option<&str>) -> Box<Future<Item=(String, String), Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string_string();
let err = _build_mint_req(command_handle, wallet_handle, submitter_did, outputs_json, extra, cb);
ResultHandler::str_str(command_handle, err, receiver)
}
fn _build_mint_req(command_handle: IndyHandle, wallet_handle: IndyHandle, submitter_did: Option<&str>, outputs_json: &str, extra: Option<&str>, cb: Option<ResponseStringStringCB>) -> ErrorCode {
let submitter_did_str = opt_c_str!(submitter_did);
let outputs_json = c_str!(outputs_json);
let extra_str = opt_c_str!(extra);
ErrorCode::from(unsafe { payments::indy_build_mint_req(command_handle, wallet_handle, opt_c_ptr!(submitter_did, submitter_did_str), outputs_json.as_ptr(), opt_c_ptr!(extra, extra_str), cb) })
}
/// Builds Indy request for setting fees for transactions in the ledger
///
/// # Arguments
/// * `wallet_handle` - wallet handle
/// * `submitter_did` - DID of request sender
/// * `payment_method`
/// * `fees_json` - {
/// txnType1: amount1,
/// txnType2: amount2,
/// .................
/// txnTypeN: amountN,
/// }
///
/// # Returns
/// * `set_txn_fees_json` - Indy request for setting fees for transactions in the ledger
pub fn build_set_txn_fees_req(wallet_handle: IndyHandle, submitter_did: Option<&str>, payment_method: &str, fees_json: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _build_set_txn_fees_req(command_handle, wallet_handle, submitter_did, payment_method, fees_json, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _build_set_txn_fees_req(command_handle: IndyHandle, wallet_handle: IndyHandle, submitter_did: Option<&str>, payment_method: &str, fees_json: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let submitter_did_str = opt_c_str!(submitter_did);
let payment_method = c_str!(payment_method);
let fees_json = c_str!(fees_json);
ErrorCode::from(unsafe { payments::indy_build_set_txn_fees_req(command_handle, wallet_handle, opt_c_ptr!(submitter_did, submitter_did_str), payment_method.as_ptr(), fees_json.as_ptr(), cb) })
}
/// Builds Indy get request for getting fees for transactions in the ledger
///
/// # Arguments
/// * `command_handle`
/// * `wallet_handle` - wallet handle
/// * `submitter_did` - DID of request sender
/// * `payment_method`
///
/// # Returns
/// * `get_txn_fees_json` - Indy request for getting fees for transactions in the ledger
pub fn build_get_txn_fees_req(wallet_handle: IndyHandle, submitter_did: Option<&str>, payment_method: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _build_get_txn_fees_req(command_handle, wallet_handle, submitter_did, payment_method, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _build_get_txn_fees_req(command_handle: IndyHandle, wallet_handle: IndyHandle, submitter_did: Option<&str>, payment_method: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let submitter_did_str = opt_c_str!(submitter_did);
let payment_method = c_str!(payment_method);
ErrorCode::from(unsafe { payments::indy_build_get_txn_fees_req(command_handle, wallet_handle, opt_c_ptr!(submitter_did, submitter_did_str), payment_method.as_ptr(), cb) })
}
/// Parses response for Indy request for getting fees
///
/// # Arguments
/// * `command_handle`
/// * `payment_method`
/// * `resp_json` - response for Indy request for getting fees
///
/// # Returns
/// * `fees_json` {
/// txnType1: amount1,
/// txnType2: amount2,
/// .................
/// txnTypeN: amountN,
/// }
pub fn parse_get_txn_fees_response(payment_method: &str, resp_json: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _parse_get_txn_fees_response(command_handle, payment_method, resp_json, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _parse_get_txn_fees_response(command_handle: IndyHandle, payment_method: &str, resp_json: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let payment_method = c_str!(payment_method);
let resp_json = c_str!(resp_json);
ErrorCode::from(unsafe { payments::indy_parse_get_txn_fees_response(command_handle, payment_method.as_ptr(), resp_json.as_ptr(), cb) })
}
pub fn build_verify_payment_req(wallet_handle: IndyHandle, submitter_did: Option<&str>, receipt: &str) -> Box<Future<Item=(String, String), Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string_string();
let err = _build_verify_req(command_handle, wallet_handle, submitter_did, receipt, cb);
ResultHandler::str_str(command_handle, err, receiver)
}
fn _build_verify_req(command_handle: IndyHandle, wallet_handle: IndyHandle, submitter_did: Option<&str>, receipt: &str, cb: Option<ResponseStringStringCB>) -> ErrorCode {
let submitter_did_str = opt_c_str!(submitter_did);
let receipt = c_str!(receipt);
ErrorCode::from(unsafe {
payments::indy_build_verify_payment_req(command_handle, wallet_handle, opt_c_ptr!(submitter_did, submitter_did_str), receipt.as_ptr(), cb)
})
}
pub fn parse_verify_payment_response(payment_method: &str, resp_json: &str) -> Box<Future<Item=String, Error=IndyError>> {
let (receiver, command_handle, cb) = ClosureHandler::cb_ec_string();
let err = _parse_verify_response(command_handle, payment_method, resp_json, cb);
ResultHandler::str(command_handle, err, receiver)
}
fn _parse_verify_response(command_handle: IndyHandle, payment_method: &str, resp_json: &str, cb: Option<ResponseStringCB>) -> ErrorCode {
let payment_method = c_str!(payment_method);
let resp_json = c_str!(resp_json);
ErrorCode::from(unsafe {
payments::indy_parse_verify_payment_response(command_handle, payment_method.as_ptr(), resp_json.as_ptr(), cb)
})
}
| 42.524838 | 206 | 0.694195 |
71f00cf89818626ea7d4a64668767595eb09110b | 4,443 | use crate::{
component_declaration::FieldDefinition,
storage_context::{ParseContext, ParseWithContext},
};
use core::marker::PhantomData;
use proc_macro2::{Span, TokenStream};
use quote::{quote, quote_spanned, ToTokens};
use syn::{
braced, parenthesized,
parse::{ParseStream, Result},
punctuated, AttrStyle, Attribute, Error, Ident, Path, Token, Type,
};
use take_mut::take;
pub struct CaptureDefinition<C> {
access: TokenStream,
_phantom: PhantomData<C>,
}
pub mod kw {
syn::custom_keyword!(pin);
}
impl<C> ParseWithContext for CaptureDefinition<C> {
type Output = Option<Self>;
fn parse_with_context(input: ParseStream<'_>, cx: &mut ParseContext) -> Result<Self::Output> {
let mut attributes = input.call(Attribute::parse_outer)?;
let pin: Option<kw::pin> = input.parse()?;
input.parse::<Token![|]>()?;
attributes.append(
&mut input
.call(Attribute::parse_inner)?
.into_iter()
.map(|inner| Attribute {
style: AttrStyle::Outer,
..inner
})
.collect(),
);
attributes.append(&mut input.call(Attribute::parse_outer)?);
let visibility = input.parse()?;
let name: Ident = input.parse()?;
let field_type;
let initial_value;
let shorthand_lookahead = input.lookahead1();
if shorthand_lookahead.peek(Token![:]) {
// Long form
input.parse::<Token![:]>()?;
field_type = {
let field_type = input.call(Type::without_plus)?;
quote!(#field_type)
};
let initial_value_buffer;
input.parse::<Token![=]>()?;
let brace = braced!(initial_value_buffer in input);
let initial_value_tokens: TokenStream = initial_value_buffer.parse()?;
let brace_span = proc_macro::Span::mixed_site()
.located_at(brace.span.unstable())
.into();
initial_value = quote_spanned! (brace_span=> {#initial_value_tokens});
} else if shorthand_lookahead.peek(Token![=]) {
// Shorthand
input.parse::<Token![=]>()?;
// Supporting ExprPath here would be better, but considerably more complicated.
let path: Path = input.parse()?;
if path.segments.len() < 2 {
return Err(Error::new_spanned(
path,
"Expected qualified path to constructor.",
));
}
let type_path_colon = path.leading_colon;
let segments: Vec<_> = path.segments.into_pairs().collect();
let (constructor_name, type_path_segments) = segments.split_last().unwrap();
let mut type_path_segments: Vec<_> = type_path_segments.to_vec();
let mut constructor_punct = Default::default();
{
let last_i = type_path_segments.len() - 1;
take(
&mut type_path_segments[last_i],
|last_in_type| match last_in_type {
punctuated::Pair::Punctuated(a_type, punct) => {
constructor_punct = punct;
punctuated::Pair::End(a_type)
}
_ => unreachable!(),
},
);
}
if let punctuated::Pair::Punctuated(_, trailing) = constructor_name {
return Err(Error::new_spanned(
trailing,
"Expected path ending with constructor name.",
));
}
field_type = quote! {
#type_path_colon#(#type_path_segments)*
};
//TODO: This is a bit hacky with regards to chaining and error escalation.
let parameters;
let paren = parenthesized!(parameters in input);
let parameters: TokenStream = parameters.parse()?;
let question: Option<Token![?]> = input.parse().ok();
initial_value = quote_spanned! {paren.span=>
#field_type#constructor_punct#constructor_name(#parameters)#question
}
} else {
return Err(shorthand_lookahead.error());
}
input.parse::<Token![|]>()?;
let field_definition = FieldDefinition {
attributes,
visibility,
name: name.clone(),
field_type,
initial_value,
structurally_pinned: pin.is_some(),
};
cx.storage_context.push(field_definition);
let access = {
if input.peek(Token![;]) {
input.parse::<Token![;]>()?;
None
} else {
Some(if let Some(pin) = pin {
let pinned_name = Ident::new(&format!("{}_pinned", name), name.span());
let pin_parens = quote_spanned!(pin.span=> ());
quote_spanned!(pinned_name.span().resolved_at(Span::mixed_site())=> this.#pinned_name#pin_parens)
} else {
quote_spanned!(name.span().resolved_at(Span::mixed_site())=> this.#name)
})
}
};
Ok(access.map(|access| Self {
access,
_phantom: PhantomData,
}))
}
}
impl<C> ToTokens for CaptureDefinition<C> {
fn to_tokens(&self, output: &mut TokenStream) {
self.access.to_tokens(output);
}
}
| 26.927273 | 102 | 0.665766 |
39c9baeef457f21ca39844621d41b1c58d660710 | 4,685 | use std::path::{Path, PathBuf};
use anyhow::{bail, Context, Result};
use clap::{Args, Subcommand};
use super::{configure::Configuration, ChildBinary};
#[derive(Debug, Args)]
pub(super) struct DeviceArgs {
#[clap(subcommand)]
command: DeviceSubcommand,
#[clap(long)]
device_credential_location: PathBuf,
}
#[derive(Debug, Subcommand)]
enum DeviceSubcommand {
Manufacture(DeviceManufactureArgs),
Analyze,
Run(DeviceRunArgs),
}
#[derive(Debug, Args)]
struct DeviceManufactureArgs {
#[clap(long)]
device_info: String,
}
#[derive(Debug, Args)]
struct DeviceRunArgs {
#[clap(long)]
allow_noninteroperable_kdf: bool,
}
async fn run_device(
_aio_dir: PathBuf,
binary_path: PathBuf,
_configuration: &Configuration,
args: &DeviceArgs,
run_args: &DeviceRunArgs,
) -> Result<()> {
let marker_location = format!(
"{}.marker",
args.device_credential_location.to_string_lossy()
);
println!("========== STARTING CLIENT ==========");
let mut command =
tokio::process::Command::new(binary_path.join(ChildBinary::ClientLinuxapp.binary_name()));
command
.env("LOG_LEVEL", "trace")
.env("DEVICE_CREDENTIAL", &args.device_credential_location)
.env(
"DEVICE_ONBOARDING_EXECUTED_MARKER_FILE_PATH",
&marker_location,
)
.kill_on_drop(true);
if run_args.allow_noninteroperable_kdf {
command.env("ALLOW_NONINTEROPERABLE_KDF", "true");
}
let status = command
.status()
.await
.context("Error starting the client")?;
println!(
"========== CLIENT ENDED WITH STATUS: {:?} ==========",
status
);
if status.success() {
log::info!("Device onboarding completed");
Ok(())
} else {
bail!("Client failed with status: {:?}", status);
}
}
async fn manufacture_device(
aio_dir: PathBuf,
binary_path: PathBuf,
configuration: &Configuration,
args: &DeviceArgs,
mfg_args: &DeviceManufactureArgs,
) -> Result<()> {
println!("========== STARTING MANUFACTURING CLIENT ==========");
let status = tokio::process::Command::new(
binary_path.join(ChildBinary::ManufacturingClient.binary_name()),
)
.env("LOG_LEVEL", "trace")
.env(
"DIUN_PUB_KEY_ROOTCERTS",
aio_dir.join("keys").join("diun_cert.pem"),
)
.env(
"MANUFACTURING_SERVER_URL",
format!(
"http://localhost:{}", //DevSkim: ignore DS137138
configuration.listen_port_manufacturing_server
),
)
.env("DI_MFG_STRING_TYPE", "serialnumber")
.env("MANUFACTURING_INFO", &mfg_args.device_info)
.env(
"DEVICE_CREDENTIAL_FILENAME",
&args.device_credential_location,
)
.kill_on_drop(true)
.status()
.await
.context("Error running manufacturing client")?;
println!(
"========== MANUFACTURING CLIENT ENDED WITH STATUS: {:?} ==========",
status
);
if status.success() {
log::info!("Device manufacturing completed");
print_device_credential(binary_path, &args.device_credential_location).await
} else {
bail!("Manufacturing client failed with status: {:?}", status);
}
}
async fn print_device_credential(
binary_path: PathBuf,
device_credential_path: &Path,
) -> Result<()> {
let status =
tokio::process::Command::new(binary_path.join(ChildBinary::OwnerTool.binary_name()))
.arg("dump-device-credential")
.args(device_credential_path)
.status()
.await
.context("Error running owner-tool to dump device credential")?;
if status.success() {
Ok(())
} else {
bail!("Owner-tool failed with status: {:?}", status)
}
}
pub(super) async fn run_device_subcommand(
aio_dir: PathBuf,
binary_path: PathBuf,
configuration: &Configuration,
args: &DeviceArgs,
) -> Result<()> {
match &args.command {
DeviceSubcommand::Manufacture(mfg_args) => {
manufacture_device(aio_dir, binary_path, configuration, args, mfg_args)
.await
.context("Error manufacturing device")
}
DeviceSubcommand::Analyze => {
print_device_credential(binary_path, &args.device_credential_location)
.await
.context("Error analyzing device credential")
}
DeviceSubcommand::Run(run_args) => {
run_device(aio_dir, binary_path, configuration, args, run_args)
.await
.context("Error running device client")
}
}
}
| 27.558824 | 98 | 0.60683 |
69b0b90bfafdbcec5c837ee3b7aa0ae7d7aa999b | 336 | use alloc::alloc::Layout;
use core::ptr::null_mut;
use crate::kernel::kernel::*;
use crate::include::asm::hlt;
#[alloc_error_handler]
pub unsafe fn alloc_error(_layout: Layout) -> ! {
x86_printk!("\nAlloc memory error");
x86_printk!("Memory layout:");
x86_printk!(" {:?}", _layout);
panic!("Alloc memory error");
}
| 24 | 49 | 0.651786 |
212aeb0406ebadaae897c2b8689bcfadfffe5c75 | 4,446 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Unix-specific extensions to primitives in the `std::process` module.
#![stable(feature = "rust1", since = "1.0.0")]
use os::unix::raw::{uid_t, gid_t};
use os::unix::io::{FromRawFd, RawFd, AsRawFd, IntoRawFd};
use process;
use sys;
use sys_common::{AsInnerMut, AsInner, FromInner, IntoInner};
/// Unix-specific extensions to the `std::process::Command` builder
#[stable(feature = "rust1", since = "1.0.0")]
pub trait CommandExt {
/// Sets the child process's user id. This translates to a
/// `setuid` call in the child process. Failure in the `setuid`
/// call will cause the spawn to fail.
#[stable(feature = "rust1", since = "1.0.0")]
fn uid(&mut self, id: uid_t) -> &mut process::Command;
/// Similar to `uid`, but sets the group id of the child process. This has
/// the same semantics as the `uid` field.
#[stable(feature = "rust1", since = "1.0.0")]
fn gid(&mut self, id: gid_t) -> &mut process::Command;
/// Create a new session (cf. `setsid(2)`) for the child process. This means
/// that the child is the leader of a new process group. The parent process
/// remains the child reaper of the new process.
///
/// This is not enough to create a daemon process. The *init* process should
/// be the child reaper of a daemon. This can be achieved if the parent
/// process exit. Moreover, a daemon should not have a controlling terminal.
/// To achieve this, a session leader (the child) must spawn another process
/// (the daemon) in the same session.
#[unstable(feature = "process_session_leader", reason = "recently added",
issue = "27811")]
fn session_leader(&mut self, on: bool) -> &mut process::Command;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl CommandExt for process::Command {
fn uid(&mut self, id: uid_t) -> &mut process::Command {
self.as_inner_mut().uid(id);
self
}
fn gid(&mut self, id: gid_t) -> &mut process::Command {
self.as_inner_mut().gid(id);
self
}
fn session_leader(&mut self, on: bool) -> &mut process::Command {
self.as_inner_mut().session_leader(on);
self
}
}
/// Unix-specific extensions to `std::process::ExitStatus`
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ExitStatusExt {
/// If the process was terminated by a signal, returns that signal.
#[stable(feature = "rust1", since = "1.0.0")]
fn signal(&self) -> Option<i32>;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ExitStatusExt for process::ExitStatus {
fn signal(&self) -> Option<i32> {
self.as_inner().signal()
}
}
#[stable(feature = "process_extensions", since = "1.2.0")]
impl FromRawFd for process::Stdio {
unsafe fn from_raw_fd(fd: RawFd) -> process::Stdio {
process::Stdio::from_inner(sys::fd::FileDesc::new(fd))
}
}
#[stable(feature = "process_extensions", since = "1.2.0")]
impl AsRawFd for process::ChildStdin {
fn as_raw_fd(&self) -> RawFd {
self.as_inner().fd().raw()
}
}
#[stable(feature = "process_extensions", since = "1.2.0")]
impl AsRawFd for process::ChildStdout {
fn as_raw_fd(&self) -> RawFd {
self.as_inner().fd().raw()
}
}
#[stable(feature = "process_extensions", since = "1.2.0")]
impl AsRawFd for process::ChildStderr {
fn as_raw_fd(&self) -> RawFd {
self.as_inner().fd().raw()
}
}
#[stable(feature = "process_extensions", since = "1.2.0")]
impl IntoRawFd for process::ChildStdin {
fn into_raw_fd(self) -> RawFd {
self.into_inner().into_fd().into_raw()
}
}
#[stable(feature = "process_extensions", since = "1.2.0")]
impl IntoRawFd for process::ChildStdout {
fn into_raw_fd(self) -> RawFd {
self.into_inner().into_fd().into_raw()
}
}
#[stable(feature = "process_extensions", since = "1.2.0")]
impl IntoRawFd for process::ChildStderr {
fn into_raw_fd(self) -> RawFd {
self.into_inner().into_fd().into_raw()
}
}
| 34.2 | 80 | 0.646199 |
62867afaeec9556964c85a94d35cde5719251941 | 4,575 | use futures::future::join_all;
use futures::future::try_join;
use futures::FutureExt;
use std::error::Error;
use std::net::{IpAddr, SocketAddr};
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::thread;
use tokio;
use tokio::io;
use tokio::net;
use crate::resolver;
use realm::RelayConfig;
// Initialize DNS recolver
// Set up channel between listener and resolver
pub async fn start_relay(configs: Vec<RelayConfig>) {
let default_ip: IpAddr = String::from("0.0.0.0").parse::<IpAddr>().unwrap();
let remote_addrs: Vec<String> = configs.iter().map(|x| x.remote_address.clone()).collect();
let mut remote_ips: Vec<Arc<RwLock<std::net::IpAddr>>> = Vec::new();
for _ in 0..remote_addrs.len() {
remote_ips.push(Arc::new(RwLock::new(default_ip.clone())))
}
let cloned_remote_ips = remote_ips.clone();
thread::spawn(move || resolver::resolve(remote_addrs, cloned_remote_ips));
resolver::print_ips(&remote_ips);
let mut iter = configs.into_iter().zip(remote_ips);
let mut runners = vec![];
while let Some((config, remote_ip)) = iter.next() {
runners.push(tokio::spawn(run(config, remote_ip)));
}
join_all(runners).await;
}
pub async fn run(config: RelayConfig, remote_ip: Arc<RwLock<IpAddr>>) {
let client_socket: SocketAddr =
format!("{}:{}", config.listening_address, config.listening_port)
.parse()
.unwrap();
let mut tcp_listener = net::TcpListener::bind(&client_socket).await.unwrap();
let mut remote_socket: SocketAddr =
format!("{}:{}", remote_ip.read().unwrap(), config.remote_port)
.parse()
.unwrap();
// Start UDP connection
let udp_remote_ip = remote_ip.clone();
thread::spawn(move || udp_transfer(client_socket.clone(), remote_socket.port(), udp_remote_ip));
// Start TCP connection
while let Ok((inbound, _)) = tcp_listener.accept().await {
remote_socket = format!("{}:{}", &(remote_ip.read().unwrap()), config.remote_port)
.parse()
.unwrap();
let transfer = transfer_tcp(inbound, remote_socket.clone()).map(|r| {
if let Err(_) = r {
return;
}
});
tokio::spawn(transfer);
}
}
// Two thread here
// 1. Receive packets and justify the forward destination. Then send packets to the second thread
// 2. Send all packets instructed by the first thread
fn udp_transfer(
local_socket: SocketAddr,
remote_port: u16,
remote_ip: Arc<RwLock<IpAddr>>,
) -> Result<(), io::Error> {
let sender = std::net::UdpSocket::bind(&local_socket).unwrap();
let receiver = sender.try_clone().unwrap();
let mut sender_vec = Vec::new();
let (packet_sender, packet_receiver) = mpsc::channel::<([u8; 2048], usize, SocketAddr)>();
// Start a new thread to send out packets
thread::spawn(move || loop {
if let Ok((data, size, client)) = packet_receiver.recv() {
if let Err(e) = sender.send_to(&data[..size], client) {
println!("failed to send UDP packet to {}, {}", client, e);
}
}
});
// Receive packets
// Storing source ip in a FIFO queue to justify the forward destination
// Send instruction to the above thread
loop {
let mut buf = [0u8; 2048];
let (size, from) = receiver.recv_from(&mut buf).unwrap();
let remote_socket: SocketAddr = format!("{}:{}", remote_ip.read().unwrap(), remote_port)
.parse()
.unwrap();
match from != remote_socket {
true => {
// forward
sender_vec.push(from);
packet_sender
.send((buf, size, remote_socket.clone()))
.unwrap();
}
false => {
// backward
if sender_vec.len() < 1 {
continue;
}
let client_socket = sender_vec.remove(0);
packet_sender.send((buf, size, client_socket)).unwrap();
}
}
}
}
async fn transfer_tcp(
mut inbound: net::TcpStream,
remote_socket: SocketAddr,
) -> Result<(), Box<dyn Error>> {
let mut outbound = net::TcpStream::connect(remote_socket).await?;
let (mut ri, mut wi) = inbound.split();
let (mut ro, mut wo) = outbound.split();
let client_to_server = io::copy(&mut ri, &mut wo);
let server_to_client = io::copy(&mut ro, &mut wi);
try_join(client_to_server, server_to_client).await?;
Ok(())
}
| 32.446809 | 100 | 0.59541 |
0ac899d5e3158335da137adc16937f5bcb1cfaca | 547 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// -*- rust -*-
fn dont_call_me() { fail!(); debug!(1); }
pub fn main() { }
| 30.388889 | 68 | 0.703839 |
3a1736c9fa3eb48a9b2f6e4bd569fbcf4152b014 | 35,080 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! This crate contains code generated from the Ballista Protocol Buffer Definition as well
//! as convenience code for interacting with the generated code.
use prost::bytes::{Buf, BufMut};
use std::fmt::Debug;
use std::marker::PhantomData;
use std::sync::Arc;
use std::{convert::TryInto, io::Cursor};
use datafusion::arrow::datatypes::{IntervalUnit, UnionMode};
use datafusion::logical_plan::{
JoinConstraint, JoinType, LogicalPlan, Operator, UserDefinedLogicalNode,
};
use datafusion::physical_plan::aggregates::AggregateFunction;
use datafusion::physical_plan::window_functions::BuiltInWindowFunction;
use crate::{error::BallistaError, serde::scheduler::Action as BallistaAction};
use datafusion::logical_plan::plan::Extension;
use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::ExecutionContext;
use prost::Message;
// include the generated protobuf source as a submodule
#[allow(clippy::all)]
pub mod protobuf {
include!(concat!(env!("OUT_DIR"), "/ballista.protobuf.rs"));
}
pub mod logical_plan;
pub mod physical_plan;
pub mod scheduler;
pub fn decode_protobuf(bytes: &[u8]) -> Result<BallistaAction, BallistaError> {
let mut buf = Cursor::new(bytes);
protobuf::Action::decode(&mut buf)
.map_err(|e| BallistaError::Internal(format!("{:?}", e)))
.and_then(|node| node.try_into())
}
pub(crate) fn proto_error<S: Into<String>>(message: S) -> BallistaError {
BallistaError::General(message.into())
}
pub trait AsLogicalPlan: Debug + Send + Sync + Clone {
fn try_decode(buf: &[u8]) -> Result<Self, BallistaError>
where
Self: Sized;
fn try_encode<B>(&self, buf: &mut B) -> Result<(), BallistaError>
where
B: BufMut,
Self: Sized;
fn try_into_logical_plan(
&self,
ctx: &ExecutionContext,
extension_codec: &dyn LogicalExtensionCodec,
) -> Result<LogicalPlan, BallistaError>;
fn try_from_logical_plan(
plan: &LogicalPlan,
extension_codec: &dyn LogicalExtensionCodec,
) -> Result<Self, BallistaError>
where
Self: Sized;
}
pub trait LogicalExtensionCodec: Debug + Send + Sync {
fn try_decode(
&self,
buf: &[u8],
inputs: &[LogicalPlan],
) -> Result<Extension, BallistaError>;
fn try_encode(
&self,
node: &Extension,
buf: &mut Vec<u8>,
) -> Result<(), BallistaError>;
}
#[derive(Debug, Clone)]
pub struct DefaultLogicalExtensionCodec {}
impl LogicalExtensionCodec for DefaultLogicalExtensionCodec {
fn try_decode(
&self,
_buf: &[u8],
_inputs: &[LogicalPlan],
) -> Result<Extension, BallistaError> {
Err(BallistaError::NotImplemented(
"LogicalExtensionCodec is not provided".to_string(),
))
}
fn try_encode(
&self,
_node: &Extension,
_buf: &mut Vec<u8>,
) -> Result<(), BallistaError> {
Err(BallistaError::NotImplemented(
"LogicalExtensionCodec is not provided".to_string(),
))
}
}
pub trait AsExecutionPlan: Debug + Send + Sync + Clone {
fn try_decode(buf: &[u8]) -> Result<Self, BallistaError>
where
Self: Sized;
fn try_encode<B>(&self, buf: &mut B) -> Result<(), BallistaError>
where
B: BufMut,
Self: Sized;
fn try_into_physical_plan(
&self,
ctx: &ExecutionContext,
extension_codec: &dyn PhysicalExtensionCodec,
) -> Result<Arc<dyn ExecutionPlan>, BallistaError>;
fn try_from_physical_plan(
plan: Arc<dyn ExecutionPlan>,
extension_codec: &dyn PhysicalExtensionCodec,
) -> Result<Self, BallistaError>
where
Self: Sized;
}
pub trait PhysicalExtensionCodec: Debug + Send + Sync {
fn try_decode(
&self,
buf: &[u8],
inputs: &[Arc<dyn ExecutionPlan>],
) -> Result<Arc<dyn ExecutionPlan>, BallistaError>;
fn try_encode(
&self,
node: Arc<dyn ExecutionPlan>,
buf: &mut Vec<u8>,
) -> Result<(), BallistaError>;
}
#[derive(Debug, Clone)]
pub struct DefaultPhysicalExtensionCodec {}
impl PhysicalExtensionCodec for DefaultPhysicalExtensionCodec {
fn try_decode(
&self,
_buf: &[u8],
_inputs: &[Arc<dyn ExecutionPlan>],
) -> Result<Arc<dyn ExecutionPlan>, BallistaError> {
Err(BallistaError::NotImplemented(
"PhysicalExtensionCodec is not provided".to_string(),
))
}
fn try_encode(
&self,
_node: Arc<dyn ExecutionPlan>,
_buf: &mut Vec<u8>,
) -> Result<(), BallistaError> {
Err(BallistaError::NotImplemented(
"PhysicalExtensionCodec is not provided".to_string(),
))
}
}
#[derive(Clone, Debug)]
pub struct BallistaCodec<T: 'static + AsLogicalPlan, U: 'static + AsExecutionPlan> {
logical_extension_codec: Arc<dyn LogicalExtensionCodec>,
physical_extension_codec: Arc<dyn PhysicalExtensionCodec>,
logical_plan_repr: PhantomData<T>,
physical_plan_repr: PhantomData<U>,
}
impl<T: 'static + AsLogicalPlan, U: 'static + AsExecutionPlan> Default
for BallistaCodec<T, U>
{
fn default() -> Self {
Self {
logical_extension_codec: Arc::new(DefaultLogicalExtensionCodec {}),
physical_extension_codec: Arc::new(DefaultPhysicalExtensionCodec {}),
logical_plan_repr: PhantomData,
physical_plan_repr: PhantomData,
}
}
}
impl<T: 'static + AsLogicalPlan, U: 'static + AsExecutionPlan> BallistaCodec<T, U> {
pub fn new(
logical_extension_codec: Arc<dyn LogicalExtensionCodec>,
physical_extension_codec: Arc<dyn PhysicalExtensionCodec>,
) -> Self {
Self {
logical_extension_codec,
physical_extension_codec,
logical_plan_repr: PhantomData,
physical_plan_repr: PhantomData,
}
}
pub fn logical_extension_codec(&self) -> &dyn LogicalExtensionCodec {
self.logical_extension_codec.as_ref()
}
pub fn physical_extension_codec(&self) -> &dyn PhysicalExtensionCodec {
self.physical_extension_codec.as_ref()
}
}
#[macro_export]
macro_rules! convert_required {
($PB:expr) => {{
if let Some(field) = $PB.as_ref() {
field.try_into()
} else {
Err(proto_error("Missing required field in protobuf"))
}
}};
}
#[macro_export]
macro_rules! into_required {
($PB:expr) => {{
if let Some(field) = $PB.as_ref() {
Ok(field.into())
} else {
Err(proto_error("Missing required field in protobuf"))
}
}};
}
#[macro_export]
macro_rules! convert_box_required {
($PB:expr) => {{
if let Some(field) = $PB.as_ref() {
field.as_ref().try_into()
} else {
Err(proto_error("Missing required field in protobuf"))
}
}};
}
pub(crate) fn from_proto_binary_op(op: &str) -> Result<Operator, BallistaError> {
match op {
"And" => Ok(Operator::And),
"Or" => Ok(Operator::Or),
"Eq" => Ok(Operator::Eq),
"NotEq" => Ok(Operator::NotEq),
"LtEq" => Ok(Operator::LtEq),
"Lt" => Ok(Operator::Lt),
"Gt" => Ok(Operator::Gt),
"GtEq" => Ok(Operator::GtEq),
"Plus" => Ok(Operator::Plus),
"Minus" => Ok(Operator::Minus),
"Multiply" => Ok(Operator::Multiply),
"Divide" => Ok(Operator::Divide),
"Modulo" => Ok(Operator::Modulo),
"Like" => Ok(Operator::Like),
"NotLike" => Ok(Operator::NotLike),
other => Err(proto_error(format!(
"Unsupported binary operator '{:?}'",
other
))),
}
}
impl From<protobuf::AggregateFunction> for AggregateFunction {
fn from(agg_fun: protobuf::AggregateFunction) -> AggregateFunction {
match agg_fun {
protobuf::AggregateFunction::Min => AggregateFunction::Min,
protobuf::AggregateFunction::Max => AggregateFunction::Max,
protobuf::AggregateFunction::Sum => AggregateFunction::Sum,
protobuf::AggregateFunction::Avg => AggregateFunction::Avg,
protobuf::AggregateFunction::Count => AggregateFunction::Count,
protobuf::AggregateFunction::ApproxDistinct => {
AggregateFunction::ApproxDistinct
}
protobuf::AggregateFunction::ArrayAgg => AggregateFunction::ArrayAgg,
protobuf::AggregateFunction::Variance => AggregateFunction::Variance,
protobuf::AggregateFunction::VariancePop => AggregateFunction::VariancePop,
protobuf::AggregateFunction::Covariance => AggregateFunction::Covariance,
protobuf::AggregateFunction::CovariancePop => {
AggregateFunction::CovariancePop
}
protobuf::AggregateFunction::Stddev => AggregateFunction::Stddev,
protobuf::AggregateFunction::StddevPop => AggregateFunction::StddevPop,
protobuf::AggregateFunction::Correlation => AggregateFunction::Correlation,
protobuf::AggregateFunction::ApproxPercentileCont => {
AggregateFunction::ApproxPercentileCont
}
protobuf::AggregateFunction::ApproxMedian => AggregateFunction::ApproxMedian,
}
}
}
impl From<protobuf::BuiltInWindowFunction> for BuiltInWindowFunction {
fn from(built_in_function: protobuf::BuiltInWindowFunction) -> Self {
match built_in_function {
protobuf::BuiltInWindowFunction::RowNumber => {
BuiltInWindowFunction::RowNumber
}
protobuf::BuiltInWindowFunction::Rank => BuiltInWindowFunction::Rank,
protobuf::BuiltInWindowFunction::PercentRank => {
BuiltInWindowFunction::PercentRank
}
protobuf::BuiltInWindowFunction::DenseRank => {
BuiltInWindowFunction::DenseRank
}
protobuf::BuiltInWindowFunction::Lag => BuiltInWindowFunction::Lag,
protobuf::BuiltInWindowFunction::Lead => BuiltInWindowFunction::Lead,
protobuf::BuiltInWindowFunction::FirstValue => {
BuiltInWindowFunction::FirstValue
}
protobuf::BuiltInWindowFunction::CumeDist => BuiltInWindowFunction::CumeDist,
protobuf::BuiltInWindowFunction::Ntile => BuiltInWindowFunction::Ntile,
protobuf::BuiltInWindowFunction::NthValue => BuiltInWindowFunction::NthValue,
protobuf::BuiltInWindowFunction::LastValue => {
BuiltInWindowFunction::LastValue
}
}
}
}
impl TryInto<datafusion::arrow::datatypes::DataType>
for &protobuf::arrow_type::ArrowTypeEnum
{
type Error = BallistaError;
fn try_into(self) -> Result<datafusion::arrow::datatypes::DataType, Self::Error> {
use datafusion::arrow::datatypes::DataType;
use protobuf::arrow_type;
Ok(match self {
arrow_type::ArrowTypeEnum::None(_) => DataType::Null,
arrow_type::ArrowTypeEnum::Bool(_) => DataType::Boolean,
arrow_type::ArrowTypeEnum::Uint8(_) => DataType::UInt8,
arrow_type::ArrowTypeEnum::Int8(_) => DataType::Int8,
arrow_type::ArrowTypeEnum::Uint16(_) => DataType::UInt16,
arrow_type::ArrowTypeEnum::Int16(_) => DataType::Int16,
arrow_type::ArrowTypeEnum::Uint32(_) => DataType::UInt32,
arrow_type::ArrowTypeEnum::Int32(_) => DataType::Int32,
arrow_type::ArrowTypeEnum::Uint64(_) => DataType::UInt64,
arrow_type::ArrowTypeEnum::Int64(_) => DataType::Int64,
arrow_type::ArrowTypeEnum::Float16(_) => DataType::Float16,
arrow_type::ArrowTypeEnum::Float32(_) => DataType::Float32,
arrow_type::ArrowTypeEnum::Float64(_) => DataType::Float64,
arrow_type::ArrowTypeEnum::Utf8(_) => DataType::Utf8,
arrow_type::ArrowTypeEnum::LargeUtf8(_) => DataType::LargeUtf8,
arrow_type::ArrowTypeEnum::Binary(_) => DataType::Binary,
arrow_type::ArrowTypeEnum::FixedSizeBinary(size) => {
DataType::FixedSizeBinary(*size)
}
arrow_type::ArrowTypeEnum::LargeBinary(_) => DataType::LargeBinary,
arrow_type::ArrowTypeEnum::Date32(_) => DataType::Date32,
arrow_type::ArrowTypeEnum::Date64(_) => DataType::Date64,
arrow_type::ArrowTypeEnum::Duration(time_unit) => {
DataType::Duration(protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?)
}
arrow_type::ArrowTypeEnum::Timestamp(protobuf::Timestamp {
time_unit,
timezone,
}) => DataType::Timestamp(
protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?,
match timezone.len() {
0 => None,
_ => Some(timezone.to_owned()),
},
),
arrow_type::ArrowTypeEnum::Time32(time_unit) => {
DataType::Time32(protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?)
}
arrow_type::ArrowTypeEnum::Time64(time_unit) => {
DataType::Time64(protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?)
}
arrow_type::ArrowTypeEnum::Interval(interval_unit) => DataType::Interval(
protobuf::IntervalUnit::from_i32_to_arrow(*interval_unit)?,
),
arrow_type::ArrowTypeEnum::Decimal(protobuf::Decimal {
whole,
fractional,
}) => DataType::Decimal(*whole as usize, *fractional as usize),
arrow_type::ArrowTypeEnum::List(list) => {
let list_type: &protobuf::Field = list
.as_ref()
.field_type
.as_ref()
.ok_or_else(|| proto_error("Protobuf deserialization error: List message missing required field 'field_type'"))?
.as_ref();
DataType::List(Box::new(list_type.try_into()?))
}
arrow_type::ArrowTypeEnum::LargeList(list) => {
let list_type: &protobuf::Field = list
.as_ref()
.field_type
.as_ref()
.ok_or_else(|| proto_error("Protobuf deserialization error: List message missing required field 'field_type'"))?
.as_ref();
DataType::LargeList(Box::new(list_type.try_into()?))
}
arrow_type::ArrowTypeEnum::FixedSizeList(list) => {
let list_type: &protobuf::Field = list
.as_ref()
.field_type
.as_ref()
.ok_or_else(|| proto_error("Protobuf deserialization error: List message missing required field 'field_type'"))?
.as_ref();
let list_size = list.list_size;
DataType::FixedSizeList(Box::new(list_type.try_into()?), list_size)
}
arrow_type::ArrowTypeEnum::Struct(strct) => DataType::Struct(
strct
.sub_field_types
.iter()
.map(|field| field.try_into())
.collect::<Result<Vec<_>, _>>()?,
),
arrow_type::ArrowTypeEnum::Union(union) => {
let union_mode = protobuf::UnionMode::from_i32(union.union_mode)
.ok_or_else(|| {
proto_error(
"Protobuf deserialization error: Unknown union mode type",
)
})?;
let union_mode = match union_mode {
protobuf::UnionMode::Dense => UnionMode::Dense,
protobuf::UnionMode::Sparse => UnionMode::Sparse,
};
let union_types = union
.union_types
.iter()
.map(|field| field.try_into())
.collect::<Result<Vec<_>, _>>()?;
DataType::Union(union_types, union_mode)
}
arrow_type::ArrowTypeEnum::Dictionary(dict) => {
let pb_key_datatype = dict
.as_ref()
.key
.as_ref()
.ok_or_else(|| proto_error("Protobuf deserialization error: Dictionary message missing required field 'key'"))?;
let pb_value_datatype = dict
.as_ref()
.value
.as_ref()
.ok_or_else(|| proto_error("Protobuf deserialization error: Dictionary message missing required field 'key'"))?;
let key_datatype: DataType = pb_key_datatype.as_ref().try_into()?;
let value_datatype: DataType = pb_value_datatype.as_ref().try_into()?;
DataType::Dictionary(Box::new(key_datatype), Box::new(value_datatype))
}
})
}
}
#[allow(clippy::from_over_into)]
impl Into<datafusion::arrow::datatypes::DataType> for protobuf::PrimitiveScalarType {
fn into(self) -> datafusion::arrow::datatypes::DataType {
use datafusion::arrow::datatypes::{DataType, TimeUnit};
match self {
protobuf::PrimitiveScalarType::Bool => DataType::Boolean,
protobuf::PrimitiveScalarType::Uint8 => DataType::UInt8,
protobuf::PrimitiveScalarType::Int8 => DataType::Int8,
protobuf::PrimitiveScalarType::Uint16 => DataType::UInt16,
protobuf::PrimitiveScalarType::Int16 => DataType::Int16,
protobuf::PrimitiveScalarType::Uint32 => DataType::UInt32,
protobuf::PrimitiveScalarType::Int32 => DataType::Int32,
protobuf::PrimitiveScalarType::Uint64 => DataType::UInt64,
protobuf::PrimitiveScalarType::Int64 => DataType::Int64,
protobuf::PrimitiveScalarType::Float32 => DataType::Float32,
protobuf::PrimitiveScalarType::Float64 => DataType::Float64,
protobuf::PrimitiveScalarType::Utf8 => DataType::Utf8,
protobuf::PrimitiveScalarType::LargeUtf8 => DataType::LargeUtf8,
protobuf::PrimitiveScalarType::Date32 => DataType::Date32,
protobuf::PrimitiveScalarType::TimeMicrosecond => {
DataType::Time64(TimeUnit::Microsecond)
}
protobuf::PrimitiveScalarType::TimeNanosecond => {
DataType::Time64(TimeUnit::Nanosecond)
}
protobuf::PrimitiveScalarType::Null => DataType::Null,
protobuf::PrimitiveScalarType::Decimal128 => DataType::Decimal(0, 0),
protobuf::PrimitiveScalarType::Date64 => DataType::Date64,
protobuf::PrimitiveScalarType::TimeSecond => {
DataType::Timestamp(TimeUnit::Second, None)
}
protobuf::PrimitiveScalarType::TimeMillisecond => {
DataType::Timestamp(TimeUnit::Millisecond, None)
}
protobuf::PrimitiveScalarType::IntervalYearmonth => {
DataType::Interval(IntervalUnit::YearMonth)
}
protobuf::PrimitiveScalarType::IntervalDaytime => {
DataType::Interval(IntervalUnit::DayTime)
}
}
}
}
impl From<protobuf::JoinType> for JoinType {
fn from(t: protobuf::JoinType) -> Self {
match t {
protobuf::JoinType::Inner => JoinType::Inner,
protobuf::JoinType::Left => JoinType::Left,
protobuf::JoinType::Right => JoinType::Right,
protobuf::JoinType::Full => JoinType::Full,
protobuf::JoinType::Semi => JoinType::Semi,
protobuf::JoinType::Anti => JoinType::Anti,
}
}
}
impl From<JoinType> for protobuf::JoinType {
fn from(t: JoinType) -> Self {
match t {
JoinType::Inner => protobuf::JoinType::Inner,
JoinType::Left => protobuf::JoinType::Left,
JoinType::Right => protobuf::JoinType::Right,
JoinType::Full => protobuf::JoinType::Full,
JoinType::Semi => protobuf::JoinType::Semi,
JoinType::Anti => protobuf::JoinType::Anti,
}
}
}
impl From<protobuf::JoinConstraint> for JoinConstraint {
fn from(t: protobuf::JoinConstraint) -> Self {
match t {
protobuf::JoinConstraint::On => JoinConstraint::On,
protobuf::JoinConstraint::Using => JoinConstraint::Using,
}
}
}
impl From<JoinConstraint> for protobuf::JoinConstraint {
fn from(t: JoinConstraint) -> Self {
match t {
JoinConstraint::On => protobuf::JoinConstraint::On,
JoinConstraint::Using => protobuf::JoinConstraint::Using,
}
}
}
fn byte_to_string(b: u8) -> Result<String, BallistaError> {
let b = &[b];
let b = std::str::from_utf8(b)
.map_err(|_| BallistaError::General("Invalid CSV delimiter".to_owned()))?;
Ok(b.to_owned())
}
fn str_to_byte(s: &str) -> Result<u8, BallistaError> {
if s.len() != 1 {
return Err(BallistaError::General("Invalid CSV delimiter".to_owned()));
}
Ok(s.as_bytes()[0])
}
fn vec_to_array<T, const N: usize>(v: Vec<T>) -> [T; N] {
v.try_into().unwrap_or_else(|v: Vec<T>| {
panic!("Expected a Vec of length {} but it was {}", N, v.len())
})
}
#[cfg(test)]
mod tests {
use async_trait::async_trait;
use datafusion::arrow::datatypes::SchemaRef;
use datafusion::datasource::object_store::local::LocalFileSystem;
use datafusion::error::DataFusionError;
use datafusion::execution::context::{ExecutionContextState, QueryPlanner};
use datafusion::execution::runtime_env::RuntimeEnv;
use datafusion::logical_plan::plan::Extension;
use datafusion::logical_plan::{
col, DFSchemaRef, Expr, LogicalPlan, LogicalPlanBuilder, UserDefinedLogicalNode,
};
use datafusion::physical_plan::planner::{DefaultPhysicalPlanner, ExtensionPlanner};
use datafusion::physical_plan::{
DisplayFormatType, Distribution, ExecutionPlan, Partitioning, PhysicalPlanner,
SendableRecordBatchStream, Statistics,
};
use datafusion::prelude::{CsvReadOptions, ExecutionConfig, ExecutionContext};
use prost::Message;
use std::any::Any;
use std::collections::BTreeMap;
use std::convert::TryInto;
use std::fmt;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
pub mod proto {
use crate::serde::protobuf;
use prost::Message;
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TopKPlanProto {
#[prost(uint64, tag = "1")]
pub k: u64,
#[prost(message, optional, tag = "2")]
pub expr: ::core::option::Option<protobuf::LogicalExprNode>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TopKExecProto {
#[prost(uint64, tag = "1")]
pub k: u64,
}
}
use crate::error::BallistaError;
use crate::serde::protobuf::{LogicalPlanNode, PhysicalPlanNode};
use crate::serde::{
AsExecutionPlan, AsLogicalPlan, BallistaCodec, LogicalExtensionCodec,
PhysicalExtensionCodec,
};
use proto::{TopKExecProto, TopKPlanProto};
struct TopKPlanNode {
k: usize,
input: LogicalPlan,
/// The sort expression (this example only supports a single sort
/// expr)
expr: Expr,
}
impl TopKPlanNode {
pub fn new(k: usize, input: LogicalPlan, expr: Expr) -> Self {
Self { k, input, expr }
}
}
impl Debug for TopKPlanNode {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.fmt_for_explain(f)
}
}
impl UserDefinedLogicalNode for TopKPlanNode {
fn as_any(&self) -> &dyn Any {
self
}
fn inputs(&self) -> Vec<&LogicalPlan> {
vec![&self.input]
}
/// Schema for TopK is the same as the input
fn schema(&self) -> &DFSchemaRef {
self.input.schema()
}
fn expressions(&self) -> Vec<Expr> {
vec![self.expr.clone()]
}
/// For example: `TopK: k=10`
fn fmt_for_explain(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TopK: k={}", self.k)
}
fn from_template(
&self,
exprs: &[Expr],
inputs: &[LogicalPlan],
) -> Arc<dyn UserDefinedLogicalNode + Send + Sync> {
assert_eq!(inputs.len(), 1, "input size inconsistent");
assert_eq!(exprs.len(), 1, "expression size inconsistent");
Arc::new(TopKPlanNode {
k: self.k,
input: inputs[0].clone(),
expr: exprs[0].clone(),
})
}
}
struct TopKExec {
input: Arc<dyn ExecutionPlan>,
/// The maxium number of values
k: usize,
}
impl TopKExec {
pub fn new(k: usize, input: Arc<dyn ExecutionPlan>) -> Self {
Self { input, k }
}
}
impl Debug for TopKExec {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "TopKExec")
}
}
#[async_trait]
impl ExecutionPlan for TopKExec {
/// Return a reference to Any that can be used for downcasting
fn as_any(&self) -> &dyn Any {
self
}
fn schema(&self) -> SchemaRef {
self.input.schema()
}
fn output_partitioning(&self) -> Partitioning {
Partitioning::UnknownPartitioning(1)
}
fn required_child_distribution(&self) -> Distribution {
Distribution::SinglePartition
}
fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
vec![self.input.clone()]
}
fn with_new_children(
&self,
children: Vec<Arc<dyn ExecutionPlan>>,
) -> datafusion::error::Result<Arc<dyn ExecutionPlan>> {
match children.len() {
1 => Ok(Arc::new(TopKExec {
input: children[0].clone(),
k: self.k,
})),
_ => Err(DataFusionError::Internal(
"TopKExec wrong number of children".to_string(),
)),
}
}
/// Execute one partition and return an iterator over RecordBatch
async fn execute(
&self,
_partition: usize,
_runtime: Arc<RuntimeEnv>,
) -> datafusion::error::Result<SendableRecordBatchStream> {
Err(DataFusionError::NotImplemented(
"not implemented".to_string(),
))
}
fn fmt_as(
&self,
t: DisplayFormatType,
f: &mut std::fmt::Formatter,
) -> std::fmt::Result {
match t {
DisplayFormatType::Default => {
write!(f, "TopKExec: k={}", self.k)
}
}
}
fn statistics(&self) -> Statistics {
// to improve the optimizability of this plan
// better statistics inference could be provided
Statistics::default()
}
}
struct TopKPlanner {}
impl ExtensionPlanner for TopKPlanner {
/// Create a physical plan for an extension node
fn plan_extension(
&self,
_planner: &dyn PhysicalPlanner,
node: &dyn UserDefinedLogicalNode,
logical_inputs: &[&LogicalPlan],
physical_inputs: &[Arc<dyn ExecutionPlan>],
_ctx_state: &ExecutionContextState,
) -> datafusion::error::Result<Option<Arc<dyn ExecutionPlan>>> {
Ok(
if let Some(topk_node) = node.as_any().downcast_ref::<TopKPlanNode>() {
assert_eq!(logical_inputs.len(), 1, "Inconsistent number of inputs");
assert_eq!(physical_inputs.len(), 1, "Inconsistent number of inputs");
// figure out input name
Some(Arc::new(TopKExec {
input: physical_inputs[0].clone(),
k: topk_node.k,
}))
} else {
None
},
)
}
}
struct TopKQueryPlanner {}
#[async_trait]
impl QueryPlanner for TopKQueryPlanner {
/// Given a `LogicalPlan` created from above, create an
/// `ExecutionPlan` suitable for execution
async fn create_physical_plan(
&self,
logical_plan: &LogicalPlan,
ctx_state: &ExecutionContextState,
) -> datafusion::error::Result<Arc<dyn ExecutionPlan>> {
// Teach the default physical planner how to plan TopK nodes.
let physical_planner =
DefaultPhysicalPlanner::with_extension_planners(vec![Arc::new(
TopKPlanner {},
)]);
// Delegate most work of physical planning to the default physical planner
physical_planner
.create_physical_plan(logical_plan, ctx_state)
.await
}
}
#[derive(Debug)]
pub struct TopKExtensionCodec {}
impl LogicalExtensionCodec for TopKExtensionCodec {
fn try_decode(
&self,
buf: &[u8],
inputs: &[LogicalPlan],
) -> Result<Extension, BallistaError> {
if let Some((input, _)) = inputs.split_first() {
let proto = TopKPlanProto::decode(buf).map_err(|e| {
BallistaError::Internal(format!(
"failed to decode logical plan: {:?}",
e
))
})?;
if let Some(expr) = proto.expr.as_ref() {
let node = TopKPlanNode::new(
proto.k as usize,
input.clone(),
expr.try_into()?,
);
Ok(Extension {
node: Arc::new(node),
})
} else {
Err(BallistaError::from("invalid plan, no expr".to_string()))
}
} else {
Err(BallistaError::from("invalid plan, no input".to_string()))
}
}
fn try_encode(
&self,
node: &Extension,
buf: &mut Vec<u8>,
) -> Result<(), BallistaError> {
if let Some(exec) = node.node.as_any().downcast_ref::<TopKPlanNode>() {
let proto = TopKPlanProto {
k: exec.k as u64,
expr: Some((&exec.expr).try_into()?),
};
proto.encode(buf).map_err(|e| {
BallistaError::Internal(format!(
"failed to encode logical plan: {:?}",
e
))
})?;
Ok(())
} else {
Err(BallistaError::from("unsupported plan type".to_string()))
}
}
}
impl PhysicalExtensionCodec for TopKExtensionCodec {
fn try_decode(
&self,
buf: &[u8],
inputs: &[Arc<dyn ExecutionPlan>],
) -> Result<Arc<dyn ExecutionPlan>, BallistaError> {
if let Some((input, _)) = inputs.split_first() {
let proto = TopKExecProto::decode(buf).map_err(|e| {
BallistaError::Internal(format!(
"failed to decode execution plan: {:?}",
e
))
})?;
Ok(Arc::new(TopKExec::new(proto.k as usize, input.clone())))
} else {
Err(BallistaError::from("invalid plan, no input".to_string()))
}
}
fn try_encode(
&self,
node: Arc<dyn ExecutionPlan>,
buf: &mut Vec<u8>,
) -> Result<(), BallistaError> {
if let Some(exec) = node.as_any().downcast_ref::<TopKExec>() {
let proto = TopKExecProto { k: exec.k as u64 };
proto.encode(buf).map_err(|e| {
BallistaError::Internal(format!(
"failed to encode execution plan: {:?}",
e
))
})?;
Ok(())
} else {
Err(BallistaError::from("unsupported plan type".to_string()))
}
}
}
#[tokio::test]
async fn test_extension_plan() -> crate::error::Result<()> {
let store = Arc::new(LocalFileSystem {});
let config =
ExecutionConfig::new().with_query_planner(Arc::new(TopKQueryPlanner {}));
let ctx = ExecutionContext::with_config(config);
let scan = LogicalPlanBuilder::scan_csv(
store,
"../../../datafusion/tests/customer.csv",
CsvReadOptions::default(),
None,
1,
)
.await?
.build()?;
let topk_plan = LogicalPlan::Extension(Extension {
node: Arc::new(TopKPlanNode::new(3, scan, col("revenue"))),
});
let topk_exec = ctx.create_physical_plan(&topk_plan).await?;
let extension_codec = TopKExtensionCodec {};
let proto = LogicalPlanNode::try_from_logical_plan(&topk_plan, &extension_codec)?;
let logical_round_trip = proto.try_into_logical_plan(&ctx, &extension_codec)?;
assert_eq!(
format!("{:?}", topk_plan),
format!("{:?}", logical_round_trip)
);
let proto = PhysicalPlanNode::try_from_physical_plan(
topk_exec.clone(),
&extension_codec,
)?;
let physical_round_trip = proto.try_into_physical_plan(&ctx, &extension_codec)?;
assert_eq!(
format!("{:?}", topk_exec),
format!("{:?}", physical_round_trip)
);
Ok(())
}
}
| 35.795918 | 132 | 0.567645 |
167cd232693c732155d9121296f73d485bf4f32e | 3,661 | // Generated from definition io.k8s.api.core.v1.TopologySelectorTerm
/// A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct TopologySelectorTerm {
/// A list of topology selector requirements by labels.
pub match_label_expressions: Option<Vec<crate::v1_16::api::core::v1::TopologySelectorLabelRequirement>>,
}
impl<'de> serde::Deserialize<'de> for TopologySelectorTerm {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_match_label_expressions,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"matchLabelExpressions" => Field::Key_match_label_expressions,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = TopologySelectorTerm;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct TopologySelectorTerm")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_match_label_expressions: Option<Vec<crate::v1_16::api::core::v1::TopologySelectorLabelRequirement>> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_match_label_expressions => value_match_label_expressions = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(TopologySelectorTerm {
match_label_expressions: value_match_label_expressions,
})
}
}
deserializer.deserialize_struct(
"TopologySelectorTerm",
&[
"matchLabelExpressions",
],
Visitor,
)
}
}
impl serde::Serialize for TopologySelectorTerm {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"TopologySelectorTerm",
self.match_label_expressions.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.match_label_expressions {
serde::ser::SerializeStruct::serialize_field(&mut state, "matchLabelExpressions", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
| 41.602273 | 277 | 0.567878 |
1e4c19b4f9b1c4eb06a66634b38573496ce98be5 | 241 | use serde::{Deserialize, Serialize};
#[repr(u32)]
#[derive(Deserialize, Serialize, Debug)]
pub enum RequestKind {
Default = 0,
Telemetry = 1,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Request {
kind: RequestKind,
}
| 17.214286 | 40 | 0.680498 |
1ca80bbba27fab2562fff5eb4d549d28b606798e | 1,561 | #![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
//! <p>Catalog API actions allow you to manage your entities through list, describe, and update
//! capabilities. An entity can be a product or an offer on AWS Marketplace. </p>
//! <p>You can automate your entity update process by integrating the AWS Marketplace Catalog
//! API with your AWS Marketplace product build or deployment pipelines. You can also create
//! your own applications on top of the Catalog API to manage your products on AWS
//! Marketplace.</p>
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use error_meta::Error;
pub use config::Config;
mod aws_endpoint;
#[cfg(feature = "client")]
pub mod client;
pub mod config;
pub mod error;
mod error_meta;
pub mod input;
mod json_deser;
mod json_errors;
mod json_ser;
pub mod model;
pub mod operation;
mod operation_deser;
mod operation_ser;
pub mod output;
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use smithy_http::byte_stream::ByteStream;
pub use smithy_http::result::SdkError;
pub use smithy_types::Blob;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("marketplacecatalog", PKG_VERSION);
pub use aws_auth::Credentials;
pub use aws_types::region::Region;
#[cfg(feature = "client")]
pub use client::Client;
pub use smithy_http::endpoint::Endpoint;
| 34.688889 | 95 | 0.768097 |
1401adf1d57d40e1a1821248ee6229557bfae891 | 76,642 | use bls::*;
use errors::ErrorCode;
use errors::ToErrorCode;
use std::os::raw::c_void;
use std::slice;
/// Creates and returns random generator point that satisfy BLS algorithm requirements.
///
/// BLS algorithm requires choosing of generator point that must be known to all parties.
/// The most of BLS methods require generator to be provided.
///
/// Note: Generator instance deallocation must be performed by calling indy_crypto_bls_generator_free
///
/// # Arguments
/// * `gen_p` - Reference that will contain generator instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_generator_new(gen_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_generator_new: >>> gen_p: {:?}", gen_p);
check_useful_c_ptr!(gen_p, ErrorCode::CommonInvalidParam1);
let res = match Generator::new() {
Ok(gen) => {
trace!("indy_crypto_bls_generator_new: gen: {:?}", gen);
unsafe {
*gen_p = Box::into_raw(Box::new(gen)) as *const c_void;
trace!("indy_crypto_bls_generator_new: *gen_p: {:?}", *gen_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_generator_new: <<< res: {:?}", res);
res
}
/// Creates and returns generator point from bytes representation.
///
/// Note: Generator instance deallocation must be performed by calling indy_crypto_bls_generator_free
///
/// # Arguments
/// * `bytes` - Bytes buffer pointer
/// * `bytes_len` - Bytes buffer len
/// * `gen_p` - Reference that will contain generator instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_generator_from_bytes(bytes: *const u8, bytes_len: usize,
gen_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_generator_from_bytes: >>> bytes: {:?}, bytes_len: {:?}, gen_p: {:?}", bytes, bytes_len, gen_p);
check_useful_c_byte_array!(bytes, bytes_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(gen_p, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_generator_from_bytes: bytes: {:?}", bytes);
let res = match Generator::from_bytes(bytes) {
Ok(gen) => {
trace!("indy_crypto_bls_generator_from_bytes: gen: {:?}", gen);
unsafe {
*gen_p = Box::into_raw(Box::new(gen)) as *const c_void;
trace!("indy_crypto_bls_generator_from_bytes: *gen_p: {:?}", *gen_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_generator_from_bytes: <<< res: {:?}", res);
res
}
/// Returns bytes representation of generator point.
///
/// Note: Returned buffer lifetime is the same as generator instance.
///
/// # Arguments
/// * `gen` - Generator instance pointer
/// * `bytes_p` - Pointer that will contains bytes buffer
/// * `bytes_len_p` - Pointer that will contains bytes buffer len
#[no_mangle]
pub extern fn indy_crypto_bls_generator_as_bytes(gen: *const c_void,
bytes_p: *mut *const u8, bytes_len_p: *mut usize) -> ErrorCode {
trace!("indy_crypto_bls_generator_as_bytes: >>> gen: {:?}, bytes_p: {:?}, bytes_len_p: {:?}", gen, bytes_p, bytes_len_p);
check_useful_c_reference!(gen, Generator, ErrorCode::CommonInvalidParam1);
check_useful_c_ptr!(bytes_p, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(bytes_len_p, ErrorCode::CommonInvalidParam3);
trace!("indy_crypto_bls_generator_as_bytes: >>> gen: {:?}", gen);
unsafe {
*bytes_p = gen.as_bytes().as_ptr();
*bytes_len_p = gen.as_bytes().len();
};
let res = ErrorCode::Success;
trace!("indy_crypto_bls_generator_as_bytes: <<< res: {:?}", res);
res
}
/// Deallocates generator instance.
///
/// # Arguments
/// * `gen` - Generator instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_generator_free(gen: *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_generator_free: >>> gen: {:?}", gen);
check_useful_c_ptr!(gen, ErrorCode::CommonInvalidParam1);
unsafe { Box::from_raw(gen as *mut Generator); }
let res = ErrorCode::Success;
trace!("indy_crypto_bls_generator_free: <<< res: {:?}", res);
res
}
/// Creates and returns random (or seeded from seed) BLS sign key algorithm requirements.
///
/// Note: Sign Key instance deallocation must be performed by calling indy_crypto_bls_sign_key_free.
///
/// # Arguments
/// * `seed` - Seed buffer pointer. For random generation null must be passed.
/// * `seed` - Seed buffer len.
/// * `gen_p` - Reference that will contain sign key instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_sign_key_new(seed: *const u8,
seed_len: usize,
sign_key_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_sign_key_new: >>> seed: {:?}, seed_len: {:?}, sign_key_p: {:?}", seed, seed_len, sign_key_p);
check_useful_opt_c_byte_array!(seed, seed_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
trace!("indy_crypto_bls_sign_key_new: seed: {:?}", secret!(&seed));
let res = match SignKey::new(seed) {
Ok(sign_key) => {
trace!("indy_crypto_bls_generator_new: gen: {:?}", secret!(&sign_key));
unsafe {
*sign_key_p = Box::into_raw(Box::new(sign_key)) as *const c_void;
trace!("indy_crypto_bls_sign_key_new: *sign_key_p: {:?}", *sign_key_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_sign_key_new: <<< res: {:?}", res);
res
}
/// Creates and returns sign key from bytes representation.
///
/// Note: Sign key instance deallocation must be performed by calling indy_crypto_bls_sign_key_free
///
/// # Arguments
/// * `bytes` - Bytes buffer pointer
/// * `bytes_len` - Bytes buffer len
/// * `sign_key_p` - Reference that will contain sign key instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_sign_key_from_bytes(bytes: *const u8, bytes_len: usize,
sign_key_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_sign_key_from_bytes: >>> bytes: {:?}, bytes_len: {:?}, gen_p: {:?}", bytes, bytes_len, sign_key_p);
check_useful_c_byte_array!(bytes, bytes_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(sign_key_p, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_sign_key_from_bytes: bytes: {:?}", secret!(&bytes));
let res = match SignKey::from_bytes(bytes) {
Ok(sign_key) => {
trace!("indy_crypto_bls_sign_key_from_bytes: sign_key: {:?}", secret!(&sign_key));
unsafe {
*sign_key_p = Box::into_raw(Box::new(sign_key)) as *const c_void;
trace!("indy_crypto_bls_sign_key_from_bytes: *sign_key_p: {:?}", *sign_key_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_sign_key_from_bytes: <<< res: {:?}", res);
res
}
/// Returns bytes representation of sign key.
///
/// Note: Returned buffer lifetime is the same as sign key instance.
///
/// # Arguments
/// * `sign_key` - Sign key instance pointer
/// * `bytes_p` - Pointer that will contains bytes buffer
/// * `bytes_len_p` - Pointer that will contains bytes buffer len
#[no_mangle]
pub extern fn indy_crypto_bls_sign_key_as_bytes(sign_key: *const c_void,
bytes_p: *mut *const u8, bytes_len_p: *mut usize) -> ErrorCode {
trace!("indy_crypto_bls_sign_key_as_bytes: >>> sign_key: {:?}, bytes_p: {:?}, bytes_len_p: {:?}", sign_key, bytes_p, bytes_len_p);
check_useful_c_reference!(sign_key, SignKey, ErrorCode::CommonInvalidParam1);
check_useful_c_ptr!(bytes_p, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(bytes_len_p, ErrorCode::CommonInvalidParam3);
trace!("indy_crypto_bls_sign_key_as_bytes: sign_key: {:?}", secret!(sign_key));
unsafe {
*bytes_p = sign_key.as_bytes().as_ptr();
*bytes_len_p = sign_key.as_bytes().len();
};
let res = ErrorCode::Success;
trace!("indy_crypto_bls_sign_key_as_bytes: <<< res: {:?}", res);
res
}
/// Deallocates sign key instance.
///
/// # Arguments
/// * `sign_key` - Sign key instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_sign_key_free(sign_key: *const c_void) -> ErrorCode {
check_useful_c_ptr!(sign_key, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_sign_key_free: >>> sign_key: {:?}", secret!(sign_key));
unsafe { Box::from_raw(sign_key as *mut SignKey); }
let res = ErrorCode::Success;
trace!("indy_crypto_bls_sign_key_free: <<< res: {:?}", res);
res
}
/// Creates and returns BLS ver key that corresponds to sign key.
///
/// Note: Verification key instance deallocation must be performed by calling indy_crypto_bls_ver_key_free.
///
/// # Arguments
/// * `gen` - Generator point instance
/// * `sign_key` - Sign key instance
/// * `ver_key_p` - Reference that will contain verification key instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_ver_key_new(gen: *const c_void,
sign_key: *const c_void,
ver_key_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_ver_key_new: >>> gen: {:?}, sign_key: {:?}, ver_key_p: {:?}", gen, sign_key, ver_key_p);
check_useful_c_reference!(gen, Generator, ErrorCode::CommonInvalidParam1);
check_useful_c_reference!(sign_key, SignKey, ErrorCode::CommonInvalidParam2);
trace!("indy_crypto_bls_ver_key_new: gen: {:?}, sign_key: {:?}", gen, secret!(sign_key));
let res = match VerKey::new(gen, sign_key) {
Ok(ver_key) => {
trace!("indy_crypto_bls_ver_key_new: ver_key: {:?}", ver_key);
unsafe {
*ver_key_p = Box::into_raw(Box::new(ver_key)) as *const c_void;
trace!("indy_crypto_bls_ver_key_new: *ver_key_p: {:?}", *ver_key_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_sign_key_new: <<< res: {:?}", res);
res
}
/// Creates and returns verification key from bytes representation.
///
/// Note: Verification key instance deallocation must be performed by calling indy_crypto_bls_very_key_free
///
/// # Arguments
/// * `bytes` - Bytes buffer pointer
/// * `bytes_len` - Bytes buffer len
/// * `ver_key_p` - Reference that will contain verification key instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_ver_key_from_bytes(bytes: *const u8, bytes_len: usize,
ver_key_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_ver_key_from_bytes: >>> bytes: {:?}, bytes_len: {:?}, gen_p: {:?}", bytes, bytes_len, ver_key_p);
check_useful_c_byte_array!(bytes, bytes_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(ver_key_p, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_ver_key_from_bytes: bytes: {:?}", bytes);
let res = match VerKey::from_bytes(bytes) {
Ok(ver_key) => {
trace!("indy_crypto_bls_ver_key_from_bytes: sign_key: {:?}", ver_key);
unsafe {
*ver_key_p = Box::into_raw(Box::new(ver_key)) as *const c_void;
trace!("indy_crypto_bls_ver_key_from_bytes: *ver_key_p: {:?}", *ver_key_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_ver_key_from_bytes: <<< res: {:?}", res);
res
}
/// Returns bytes representation of verification key.
///
/// Note: Returned buffer lifetime is the same as verification key instance.
///
/// # Arguments
/// * `ver_key` - Verification key instance pointer
/// * `bytes_p` - Pointer that will contains bytes buffer
/// * `bytes_len_p` - Pointer that will contains bytes buffer len
#[no_mangle]
pub extern fn indy_crypto_bls_ver_key_as_bytes(ver_key: *const c_void,
bytes_p: *mut *const u8, bytes_len_p: *mut usize) -> ErrorCode {
trace!("indy_crypto_bls_sign_key_as_bytes: >>> ver_key: {:?}, bytes_p: {:?}, bytes_len_p: {:?}", ver_key, bytes_p, bytes_len_p);
check_useful_c_reference!(ver_key, VerKey, ErrorCode::CommonInvalidParam1);
check_useful_c_ptr!(bytes_p, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(bytes_len_p, ErrorCode::CommonInvalidParam3);
trace!("indy_crypto_bls_ver_key_as_bytes: ver_key: {:?}", ver_key);
unsafe {
*bytes_p = ver_key.as_bytes().as_ptr();
*bytes_len_p = ver_key.as_bytes().len();
};
let res = ErrorCode::Success;
trace!("indy_crypto_bls_ver_key_as_bytes: <<< res: {:?}", res);
res
}
/// Deallocates verification key instance.
///
/// # Arguments
/// * `ver_key` - Verification key instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_ver_key_free(ver_key: *const c_void) -> ErrorCode {
check_useful_c_ptr!(ver_key, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_ver_key_free: >>> ver_key: {:?}", ver_key);
unsafe { Box::from_raw(ver_key as *mut VerKey); }
let res = ErrorCode::Success;
trace!("indy_crypto_bls_ver_key_free: <<< res: {:?}", res);
res
}
/// Creates and returns BLS proof of possession that corresponds to ver key and sign key.
///
/// Note: Proof of possession instance deallocation must be performed by calling indy_crypto_bls_pop_free.
///
/// # Arguments
/// * `ver_key` - Ver key instance
/// * `sign_key` - Sign key instance
/// * `pop_p` - Reference that will contain proof of possession instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_pop_new(ver_key: *const c_void,
sign_key: *const c_void,
pop_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_pop_new: >>> ver_key: {:?}, sign_key: {:?}, pop_p: {:?}", ver_key, sign_key, pop_p);
check_useful_c_reference!(ver_key, VerKey, ErrorCode::CommonInvalidParam1);
check_useful_c_reference!(sign_key, SignKey, ErrorCode::CommonInvalidParam2);
trace!("indy_crypto_bls_pop_new: ver_key: {:?}, sign_key: {:?}", ver_key, sign_key);
let res = match ProofOfPossession::new(ver_key, sign_key) {
Ok(pop) => {
trace!("indy_crypto_bls_pop_new: pop: {:?}", pop);
unsafe {
*pop_p = Box::into_raw(Box::new(pop)) as *const c_void;
trace!("indy_crypto_bls_pop_new: *pop_p: {:?}", *pop_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_pop_new: <<< res: {:?}", res);
res
}
/// Creates and returns proof of possession from bytes representation.
///
/// Note: Proof of possession instance deallocation must be performed by calling indy_crypto_bls_pop_free
///
/// # Arguments
/// * `bytes` - Bytes buffer pointer
/// * `bytes_len` - Bytes buffer len
/// * `pop_p` - Reference that will contain proof of possession instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_pop_from_bytes(bytes: *const u8, bytes_len: usize,
pop_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_pop_from_bytes: >>> bytes: {:?}, bytes_len: {:?}, gen_p: {:?}", bytes, bytes_len, pop_p);
check_useful_c_byte_array!(bytes, bytes_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(pop_p, ErrorCode::CommonInvalidParam3);
trace!("indy_crypto_bls_pop_from_bytes: bytes: {:?}", bytes);
let res = match ProofOfPossession::from_bytes(bytes) {
Ok(pop) => {
trace!("indy_crypto_bls_pop_from_bytes: pop: {:?}", pop);
unsafe {
*pop_p = Box::into_raw(Box::new(pop)) as *const c_void;
trace!("indy_crypto_bls_pop_from_bytes: *pop_p: {:?}", *pop_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_pop_from_bytes: <<< res: {:?}", res);
res
}
/// Returns bytes representation of proof of possession.
///
/// Note: Returned buffer lifetime is the same as proof of possession instance.
///
/// # Arguments
/// * `pop` - Proof of possession instance pointer
/// * `bytes_p` - Pointer that will contains bytes buffer
/// * `bytes_len_p` - Pointer that will contains bytes buffer len
#[no_mangle]
pub extern fn indy_crypto_bls_pop_as_bytes(pop: *const c_void,
bytes_p: *mut *const u8, bytes_len_p: *mut usize) -> ErrorCode {
trace!("indy_crypto_bls_pop_as_bytes: >>> pop: {:?}, bytes_p: {:?}, bytes_len_p: {:?}", pop, bytes_p, bytes_len_p);
check_useful_c_reference!(pop, ProofOfPossession, ErrorCode::CommonInvalidParam1);
check_useful_c_ptr!(bytes_p, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(bytes_len_p, ErrorCode::CommonInvalidParam3);
trace!("indy_crypto_bls_pop_as_bytes: pop: {:?}", pop);
unsafe {
*bytes_p = pop.as_bytes().as_ptr();
*bytes_len_p = pop.as_bytes().len();
};
let res = ErrorCode::Success;
trace!("indy_crypto_bls_pop_as_bytes: <<< res: {:?}", res);
res
}
/// Deallocates proof of possession instance.
///
/// # Arguments
/// * `pop` - Proof of possession instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_pop_free(pop: *const c_void) -> ErrorCode {
check_useful_c_ptr!(pop, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_pop_free: >>> pop: {:?}", pop);
unsafe { Box::from_raw(pop as *mut ProofOfPossession); }
let res = ErrorCode::Success;
trace!("indy_crypto_bls_pop_free: <<< res: {:?}", res);
res
}
/// Creates and returns signature from bytes representation.
///
/// Note: Signature instance deallocation must be performed by calling indy_crypto_bls_signature_free
///
/// # Arguments
/// * `bytes` - Bytes buffer pointer
/// * `bytes_len` - Bytes buffer len
/// * `signature_p` - Reference that will contain signature instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_signature_from_bytes(bytes: *const u8, bytes_len: usize,
signature_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_signature_from_bytes: >>> bytes: {:?}, bytes_len: {:?}, signature_p: {:?}", bytes, bytes_len, signature_p);
check_useful_c_byte_array!(bytes, bytes_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(signature_p, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_signature_from_bytes: bytes: {:?}", bytes);
let res = match Signature::from_bytes(bytes) {
Ok(signature) => {
trace!("indy_crypto_bls_signature_from_bytes: signature: {:?}", signature);
unsafe {
*signature_p = Box::into_raw(Box::new(signature)) as *const c_void;
trace!("indy_crypto_bls_signature_from_bytes: *signature_p: {:?}", *signature_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_signature_from_bytes: <<< res: {:?}", res);
res
}
/// Returns bytes representation of signature.
///
/// Note: Returned buffer lifetime is the same as signature instance.
///
/// # Arguments
/// * `signature` - Signature instance pointer
/// * `bytes_p` - Pointer that will contains bytes buffer
/// * `bytes_len_p` - Pointer that will contains bytes buffer len
#[no_mangle]
pub extern fn indy_crypto_bls_signature_as_bytes(signature: *const c_void,
bytes_p: *mut *const u8, bytes_len_p: *mut usize) -> ErrorCode {
trace!("indy_crypto_bls_signature_as_bytes: >>> signature: {:?}, bytes_p: {:?}, bytes_len_p: {:?}", signature, bytes_p, bytes_len_p);
check_useful_c_reference!(signature, Signature, ErrorCode::CommonInvalidParam1);
check_useful_c_ptr!(bytes_p, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(bytes_len_p, ErrorCode::CommonInvalidParam3);
trace!("indy_crypto_bls_signature_as_bytes: signature: {:?}", signature);
unsafe {
*bytes_p = signature.as_bytes().as_ptr();
*bytes_len_p = signature.as_bytes().len();
};
let res = ErrorCode::Success;
trace!("indy_crypto_bls_signature_as_bytes: <<< res: {:?}", res);
res
}
/// Deallocates signature instance.
///
/// # Arguments
/// * `signature` - Signature instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_signature_free(signature: *const c_void) -> ErrorCode {
check_useful_c_ptr!(signature, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_signature_free: >>> signature: {:?}", signature);
unsafe { Box::from_raw(signature as *mut Signature); }
let res = ErrorCode::Success;
trace!("indy_crypto_bls_signature_free: <<< res: {:?}", res);
res
}
/// Creates and returns multi signature for provided list of signatures.
///
/// Note: Multi signature instance deallocation must be performed by calling indy_crypto_bls_multi_signature_free.
///
/// # Arguments
/// * `signatures` - Signature instance pointers array
/// * `signatures_len` - Signature instance pointers array len
/// * `multi_sig_p` - Reference that will contain multi signature instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_multi_signature_new(signatures: *const *const c_void,
signatures_len: usize,
multi_sig_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_multi_signature_new: >>> signatures: {:?}, signatures_len: {:?}, multi_sig_p: {:?}", signatures, signatures_len, multi_sig_p);
check_useful_c_reference_array!(signatures, signatures_len, Signature, ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(multi_sig_p, ErrorCode::CommonInvalidParam3);
trace!("indy_crypto_bls_multi_signature_new: signatures: {:?}", signatures);
let res = match MultiSignature::new(&signatures) {
Ok(multi_sig) => {
trace!("indy_crypto_bls_multi_signature_new: multi_sig: {:?}", multi_sig);
unsafe {
*multi_sig_p = Box::into_raw(Box::new(multi_sig)) as *const c_void;
trace!("indy_crypto_bls_multi_signature_new: *multi_sig_p: {:?}", *multi_sig_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_multi_signature_new: <<< res: {:?}", res);
res
}
/// Creates and returns multi signature from bytes representation.
///
/// Note: Multi signature instance deallocation must be performed by calling indy_crypto_bls_multi_signature_free
///
/// # Arguments
/// * `bytes` - Bytes buffer pointer
/// * `bytes_len` - Bytes buffer len
/// * `multi_sig_p` - Reference that will contain multi signature instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_multi_signature_from_bytes(bytes: *const u8, bytes_len: usize,
multi_sig_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_multi_signature_from_bytes: >>> bytes: {:?}, bytes_len: {:?}, multi_sig_p: {:?}", bytes, bytes_len, multi_sig_p);
check_useful_c_byte_array!(bytes, bytes_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(multi_sig_p, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_multi_signature_from_bytes: bytes: {:?}", bytes);
let res = match MultiSignature::from_bytes(bytes) {
Ok(multi_sig) => {
trace!("indy_crypto_bls_multi_signature_from_bytes: multi_sig: {:?}", multi_sig);
unsafe {
*multi_sig_p = Box::into_raw(Box::new(multi_sig)) as *const c_void;
trace!("indy_crypto_bls_multi_signature_from_bytes: *multi_sig_p: {:?}", *multi_sig_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_multi_signature_from_bytes: <<< res: {:?}", res);
res
}
/// Returns bytes representation of multi signature.
///
/// Note: Returned buffer lifetime is the same as multi signature instance.
///
/// # Arguments
/// * `multi_sig` - Multi signature instance pointer
/// * `bytes_p` - Pointer that will contains bytes buffer
/// * `bytes_len_p` - Pointer that will contains bytes buffer len
#[no_mangle]
pub extern fn indy_crypto_bls_multi_signature_as_bytes(multi_sig: *const c_void,
bytes_p: *mut *const u8, bytes_len_p: *mut usize) -> ErrorCode {
trace!("indy_crypto_bls_multi_signature_as_bytes: >>> multi_sig: {:?}, bytes_p: {:?}, bytes_len_p: {:?}", multi_sig, bytes_p, bytes_len_p);
check_useful_c_ptr!(multi_sig, ErrorCode::CommonInvalidParam1);
check_useful_c_ptr!(bytes_p, ErrorCode::CommonInvalidParam2);
check_useful_c_ptr!(bytes_len_p, ErrorCode::CommonInvalidParam3);
let multi_sig = unsafe { &*(multi_sig as *const MultiSignature) };
trace!("indy_crypto_bls_multi_signature_as_bytes: multi_sig: {:?}", multi_sig);
unsafe {
*bytes_p = multi_sig.as_bytes().as_ptr();
*bytes_len_p = multi_sig.as_bytes().len();
};
let res = ErrorCode::Success;
trace!("indy_crypto_bls_multi_signature_as_bytes: <<< res: {:?}", res);
res
}
/// Deallocates multi signature instance.
///
/// # Arguments
/// * `multi_sig` - Multi signature instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_multi_signature_free(multi_sig: *const c_void) -> ErrorCode {
check_useful_c_ptr!(multi_sig, ErrorCode::CommonInvalidParam1);
trace!("indy_crypto_bls_multi_signature_free: >>> multi_sig: {:?}", multi_sig);
unsafe { Box::from_raw(multi_sig as *mut MultiSignature); }
let res = ErrorCode::Success;
trace!("indy_crypto_bls_multi_signature_free: <<< res: {:?}", res);
res
}
/// Signs the message and returns signature.
///
/// Note: allocated buffer referenced by (signature_p, signature_len_p) must be
/// deallocated by calling indy_crypto_bls_free_array.
///
/// # Arguments
///
/// * `message` - Message to sign buffer pointer
/// * `message_len` - Message to sign buffer len
/// * `sign_key` - Pointer to Sign Key instance
/// * `signature_p` - Reference that will contain Signture Instance pointer
#[no_mangle]
pub extern fn indy_crypto_bls_sign(message: *const u8,
message_len: usize,
sign_key: *const c_void,
signature_p: *mut *const c_void) -> ErrorCode {
trace!("indy_crypto_bls_sign: >>> message: {:?}, message_len: {:?}, sign_key: {:?}, signature_p: {:?}", message, message_len, sign_key, signature_p);
check_useful_c_byte_array!(message, message_len,
ErrorCode::CommonInvalidParam1, ErrorCode::CommonInvalidParam2);
check_useful_c_reference!(sign_key, SignKey, ErrorCode::CommonInvalidParam3);
check_useful_c_ptr!(signature_p, ErrorCode::CommonInvalidParam5);
trace!("indy_crypto_bls_sign: message: {:?}, sign_key: {:?}", message, secret!(sign_key));
let res = match Bls::sign(message, sign_key) {
Ok(signature) => {
unsafe {
trace!("indy_crypto_bls_sign: signature: {:?}", signature);
*signature_p = Box::into_raw(Box::new(signature)) as *const c_void;
trace!("indy_crypto_bls_sign: *signature_p: {:?}", *signature_p);
}
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_sign: <<< res: {:?}", res);
res
}
/// Verifies the message signature and returns true - if signature valid or false otherwise.
///
/// # Arguments
///
/// * `signature` - Signature instance pointer
/// * `message` - Message to verify buffer pointer
/// * `message_len` - Message to verify buffer len
/// * `ver_key` - Verification key instance pinter
/// * `gen` - Generator instance pointer
/// * `valid_p` - Reference that will be filled with true - if signature valid or false otherwise.
#[no_mangle]
pub extern fn indy_crypto_bsl_verify(signature: *const c_void,
message: *const u8,
message_len: usize,
ver_key: *const c_void,
gen: *const c_void,
valid_p: *mut bool) -> ErrorCode {
trace!("indy_crypto_bsl_verify: >>> signature: {:?}, message: {:?}, message_len: {:?}, ver_key: {:?}, gen: {:?}, valid_p: {:?}", signature, message, message_len, ver_key, gen, valid_p);
check_useful_c_reference!(signature, Signature, ErrorCode::CommonInvalidParam1);
check_useful_c_byte_array!(message, message_len,
ErrorCode::CommonInvalidParam2, ErrorCode::CommonInvalidParam3);
check_useful_c_reference!(ver_key, VerKey, ErrorCode::CommonInvalidParam4);
check_useful_c_reference!(gen, Generator, ErrorCode::CommonInvalidParam5);
check_useful_c_ptr!(valid_p, ErrorCode::CommonInvalidParam6);
trace!("indy_crypto_bsl_verify: signature: {:?}, message: {:?}, ver_key: {:?}, gen: {:?}", signature, message, ver_key, gen);
let res = match Bls::verify(signature, message, ver_key, gen) {
Ok(valid) => {
trace!("indy_crypto_bsl_verify: valid: {:?}", valid);
unsafe { *valid_p = valid; }
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bsl_verify: <<< res: {:?}", res);
res
}
/// Verifies the message multi signature and returns true - if signature valid or false otherwise.
///
/// # Arguments
///
/// * `multi_sig` - Multi signature instance pointer
/// * `message` - Message to verify buffer pointer
/// * `message_len` - Message to verify buffer len
/// * `ver_keys` - Verification key instance pointers array
/// * `ver_keys_len` - Verification keys instance pointers array len
/// * `gen` - Generator point instance
/// * `valid_p` - Reference that will be filled with true - if signature valid or false otherwise.
#[no_mangle]
pub extern fn indy_crypto_bls_verify_multi_sig(multi_sig: *const c_void,
message: *const u8,
message_len: usize,
ver_keys: *const *const c_void,
ver_keys_len: usize,
gen: *const c_void,
valid_p: *mut bool) -> ErrorCode {
trace!("indy_crypto_bls_verify_multi_sig: >>> multi_sig: {:?}, message: {:?}, message_len: {:?}, ver_keys: {:?}, ver_keys_len: {:?}, gen: {:?}, valid_p: {:?}", multi_sig, message, message_len, ver_keys, ver_keys_len, gen, valid_p);
check_useful_c_reference!(multi_sig, MultiSignature, ErrorCode::CommonInvalidParam1);
check_useful_c_byte_array!(message, message_len, ErrorCode::CommonInvalidParam2, ErrorCode::CommonInvalidParam3);
check_useful_c_reference_array!(ver_keys, ver_keys_len, VerKey, ErrorCode::CommonInvalidParam4, ErrorCode::CommonInvalidParam5);
check_useful_c_reference!(gen, Generator, ErrorCode::CommonInvalidParam6);
check_useful_c_ptr!(valid_p, ErrorCode::CommonInvalidParam7);
trace!("indy_crypto_bls_verify_multi_sig: multi_sig: {:?}, message: {:?}, ver_keys: {:?}, gen: {:?}", multi_sig, message, ver_keys, gen);
let res = match Bls::verify_multi_sig(multi_sig, message, &ver_keys, gen) {
Ok(valid) => {
trace!("indy_crypto_bls_verify_multi_sig: valid: {:?}", valid);
unsafe { *valid_p = valid; }
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bls_verify_multi_sig: <<< res: {:?}", res);
res
}
/// Verifies the proof of possession and returns true - if signature valid or false otherwise.
///
/// # Arguments
///
/// * `pop` - Proof of possession
/// * `ver_key` - Verification key instance pinter
/// * `gen` - Generator instance pointer
/// * `valid_p` - Reference that will be filled with true - if signature valid or false otherwise.
#[no_mangle]
pub extern fn indy_crypto_bsl_verify_pop(pop: *const c_void,
ver_key: *const c_void,
gen: *const c_void,
valid_p: *mut bool) -> ErrorCode {
trace!("indy_crypto_bsl_verify_pop: >>> pop: {:?}, ver_key: {:?}, gen: {:?}, valid_p: {:?}", pop, ver_key, gen, valid_p);
check_useful_c_reference!(pop, ProofOfPossession, ErrorCode::CommonInvalidParam1);
check_useful_c_reference!(ver_key, VerKey, ErrorCode::CommonInvalidParam2);
check_useful_c_reference!(gen, Generator, ErrorCode::CommonInvalidParam3);
check_useful_c_ptr!(valid_p, ErrorCode::CommonInvalidParam4);
trace!("indy_crypto_bsl_verify_pop: pop: {:?}, ver_key: {:?}, gen: {:?}", pop, ver_key, gen);
let res = match Bls::verify_proof_of_posession(pop, ver_key, gen) {
Ok(valid) => {
trace!("indy_crypto_bsl_verify_pop: valid: {:?}", valid);
unsafe { *valid_p = valid; }
ErrorCode::Success
}
Err(err) => err.to_error_code()
};
trace!("indy_crypto_bsl_verify_pop: <<< res: {:?}", res);
res
}
#[cfg(test)]
mod tests {
use super::*;
use std::ptr;
#[test]
fn indy_crypto_bls_generator_new_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
assert!(!gen.is_null());
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_generator_as_bytes_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_generator_as_bytes(gen, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!gen.is_null());
assert!(bytes_len > 0);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_generator_from_bytes_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_generator_as_bytes(gen, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
let mut gen2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_from_bytes(bytes, bytes_len, &mut gen2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_generator_free(gen2);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_generator_free_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
assert!(!gen.is_null());
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_sign_key_new_works() {
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
assert!(!sign_key.is_null());
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_sign_key_new_works_for_seed() {
let mut sign_key: *const c_void = ptr::null();
let seed_v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 2, 3, 4, 5, 6, 7, 8, 9, 10,
21, 2, 3, 4, 5, 6, 7, 8, 9, 10, 31, 32];
let seed = seed_v.as_ptr();
let seed_len = seed_v.len();
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
assert!(!sign_key.is_null());
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_sign_key_as_bytes_works() {
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_as_bytes(sign_key, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_sign_key_from_bytes_works() {
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_as_bytes(sign_key, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign_key_from_bytes(bytes, bytes_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_sign_key_free_works() {
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_ver_key_new_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
assert!(!ver_key.is_null());
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_ver_key_as_bytes_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_ver_key_as_bytes(ver_key, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_ver_key_from_bytes_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_ver_key_as_bytes(ver_key, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let mut ver_key2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_from_bytes(bytes, bytes_len, &mut ver_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key2);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_ver_key_free_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_signature_as_bytes_works() {
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key, &mut signature);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_signature_as_bytes(signature, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_signature_from_bytes_works() {
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key, &mut signature);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_signature_as_bytes(signature, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let mut signature2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_signature_from_bytes(bytes, bytes_len, &mut signature2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature2);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_signature_free_works() {
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key, &mut signature);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_multi_signature_new_works() {
let mut sign_key1: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key1, &mut signature1);
assert_eq!(err_code, ErrorCode::Success);
let mut signature2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key2, &mut signature2);
assert_eq!(err_code, ErrorCode::Success);
let signatures = [signature1, signature2];
let mut multi_sig: *const c_void = ptr::null();
let err_code = indy_crypto_bls_multi_signature_new(signatures.as_ptr(), signatures.len(), &mut multi_sig);
assert_eq!(err_code, ErrorCode::Success);
assert!(!multi_sig.is_null());
let err_code = indy_crypto_bls_sign_key_free(sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_multi_signature_free(multi_sig);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_multi_signature_as_bytes_works() {
let mut sign_key1: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key1, &mut signature1);
assert_eq!(err_code, ErrorCode::Success);
let mut signature2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key2, &mut signature2);
assert_eq!(err_code, ErrorCode::Success);
let signatures = [signature1, signature2];
let mut multi_sig: *const c_void = ptr::null();
let err_code = indy_crypto_bls_multi_signature_new(signatures.as_ptr(), signatures.len(), &mut multi_sig);
assert_eq!(err_code, ErrorCode::Success);
assert!(!multi_sig.is_null());
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_multi_signature_as_bytes(multi_sig, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let err_code = indy_crypto_bls_sign_key_free(sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_multi_signature_free(multi_sig);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_multi_signature_from_bytes_works() {
let mut sign_key1: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key1, &mut signature1);
assert_eq!(err_code, ErrorCode::Success);
let mut signature2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key2, &mut signature2);
assert_eq!(err_code, ErrorCode::Success);
let signatures = [signature1, signature2];
let mut multi_sig: *const c_void = ptr::null();
let err_code = indy_crypto_bls_multi_signature_new(signatures.as_ptr(), signatures.len(), &mut multi_sig);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_multi_signature_as_bytes(multi_sig, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let mut multi_sig2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_multi_signature_from_bytes(bytes, bytes_len, &mut multi_sig2);
assert_eq!(err_code, ErrorCode::Success);
assert!(!multi_sig2.is_null());
let err_code = indy_crypto_bls_sign_key_free(sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_multi_signature_free(multi_sig);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_multi_signature_free(multi_sig2);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_multi_signature_free_works() {
let mut sign_key1: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key1, &mut signature1);
assert_eq!(err_code, ErrorCode::Success);
let mut signature2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key2, &mut signature2);
assert_eq!(err_code, ErrorCode::Success);
let signatures = [signature1, signature2];
let mut multi_sig: *const c_void = ptr::null();
let err_code = indy_crypto_bls_multi_signature_new(signatures.as_ptr(), signatures.len(), &mut multi_sig);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_multi_signature_free(multi_sig);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bsl_verify_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key, &mut signature);
assert_eq!(err_code, ErrorCode::Success);
let mut valid = false;
let err_code = indy_crypto_bsl_verify(signature,
message, message_len,
ver_key,
gen, &mut valid);
assert_eq!(err_code, ErrorCode::Success);
assert!(valid);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bsl_verify_works_for_invalid() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key2, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key, &mut signature);
assert_eq!(err_code, ErrorCode::Success);
let mut valid = false;
let err_code = indy_crypto_bsl_verify(signature,
message, message_len,
ver_key,
gen, &mut valid);
assert_eq!(err_code, ErrorCode::Success);
assert!(!valid);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_pop_new_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
assert!(!ver_key.is_null());
let mut pop: *const c_void = ptr::null();
let err_code = indy_crypto_bls_pop_new(ver_key, sign_key, &mut pop);
assert_eq!(err_code, ErrorCode::Success);
assert!(!pop.is_null());
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_pop_free(pop);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_pop_as_bytes_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let mut pop: *const c_void = ptr::null();
let err_code = indy_crypto_bls_pop_new(ver_key, sign_key, &mut pop);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_pop_as_bytes(pop, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_pop_free(pop);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_pop_from_bytes_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let mut pop: *const c_void = ptr::null();
let err_code = indy_crypto_bls_pop_new(ver_key, sign_key, &mut pop);
assert_eq!(err_code, ErrorCode::Success);
let mut bytes: *const u8 = ptr::null();
let mut bytes_len: usize = 0;
let err_code = indy_crypto_bls_pop_as_bytes(pop, &mut bytes, &mut bytes_len);
assert_eq!(err_code, ErrorCode::Success);
assert!(!bytes.is_null());
assert!(bytes_len > 0);
let mut pop2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_pop_from_bytes(bytes, bytes_len, &mut pop2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_pop_free(pop);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_pop_free(pop2);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_pop_free_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let mut pop: *const c_void = ptr::null();
let err_code = indy_crypto_bls_pop_new(ver_key, sign_key, &mut pop);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_pop_free(pop);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bsl_verify_pop_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key, &mut ver_key);
assert_eq!(err_code, ErrorCode::Success);
let mut pop: *const c_void = ptr::null();
let err_code = indy_crypto_bls_pop_new(ver_key, sign_key, &mut pop);
assert_eq!(err_code, ErrorCode::Success);
let mut valid = false;
let err_code = indy_crypto_bsl_verify_pop(pop,
ver_key,
gen,
&mut valid);
assert_eq!(err_code, ErrorCode::Success);
assert!(valid);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_pop_free(pop);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_verify_multi_sig_works() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key1: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key1, &mut signature1);
assert_eq!(err_code, ErrorCode::Success);
let mut signature2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key2, &mut signature2);
assert_eq!(err_code, ErrorCode::Success);
let signatures = [signature1, signature2];
let mut multi_sig: *const c_void = ptr::null();
let err_code = indy_crypto_bls_multi_signature_new(signatures.as_ptr(), signatures.len(), &mut multi_sig);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key1, &mut ver_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key2, &mut ver_key2);
assert_eq!(err_code, ErrorCode::Success);
let ver_keys = [ver_key1, ver_key2];
let mut valid = false;
let err_code = indy_crypto_bls_verify_multi_sig(multi_sig,
message, message_len,
ver_keys.as_ptr(), ver_keys.len(),
gen,
&mut valid);
assert_eq!(err_code, ErrorCode::Success);
assert!(valid);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_multi_signature_free(multi_sig);
assert_eq!(err_code, ErrorCode::Success);
}
#[test]
fn indy_crypto_bls_verify_multi_sig_works_for_invalid() {
let mut gen: *const c_void = ptr::null();
let err_code = indy_crypto_bls_generator_new(&mut gen);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key1: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key2: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let mut sign_key3: *const c_void = ptr::null();
let seed: *const u8 = ptr::null();
let seed_len: usize = 0;
let err_code = indy_crypto_bls_sign_key_new(seed, seed_len, &mut sign_key3);
assert_eq!(err_code, ErrorCode::Success);
let message_v = vec![1, 2, 3, 4, 5];
let message = message_v.as_ptr();
let message_len = message_v.len();
let mut signature1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key1, &mut signature1);
assert_eq!(err_code, ErrorCode::Success);
let mut signature2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_sign(message, message_len, sign_key2, &mut signature2);
assert_eq!(err_code, ErrorCode::Success);
let signatures = [signature1, signature2];
let mut multi_sig: *const c_void = ptr::null();
let err_code = indy_crypto_bls_multi_signature_new(signatures.as_ptr(), signatures.len(), &mut multi_sig);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key1: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key1, &mut ver_key1);
assert_eq!(err_code, ErrorCode::Success);
let mut ver_key2: *const c_void = ptr::null();
let err_code = indy_crypto_bls_ver_key_new(gen, sign_key3, &mut ver_key2);
assert_eq!(err_code, ErrorCode::Success);
let ver_keys = [ver_key1, ver_key2];
let mut valid = false;
let err_code = indy_crypto_bls_verify_multi_sig(multi_sig,
message, message_len,
ver_keys.as_ptr(), ver_keys.len(),
gen,
&mut valid);
assert_eq!(err_code, ErrorCode::Success);
assert!(!valid);
let err_code = indy_crypto_bls_generator_free(gen);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_sign_key_free(sign_key3);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_ver_key_free(ver_key2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature1);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_signature_free(signature2);
assert_eq!(err_code, ErrorCode::Success);
let err_code = indy_crypto_bls_multi_signature_free(multi_sig);
assert_eq!(err_code, ErrorCode::Success);
}
} | 40.658886 | 235 | 0.640119 |
5df4361a8cf462ffeb1a801d5e3e607888d353ff | 265,896 | // This file is generated by rust-protobuf 2.27.1. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![cfg_attr(rustfmt, rustfmt::skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `tensorflow/core/protobuf/config.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1;
#[derive(PartialEq,Clone,Default)]
pub struct GPUOptions {
// message fields
pub per_process_gpu_memory_fraction: f64,
pub allow_growth: bool,
pub allocator_type: ::std::string::String,
pub deferred_deletion_bytes: i64,
pub visible_device_list: ::std::string::String,
pub polling_active_delay_usecs: i32,
pub polling_inactive_delay_msecs: i32,
pub force_gpu_compatible: bool,
pub experimental: ::protobuf::SingularPtrField<GPUOptions_Experimental>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a GPUOptions {
fn default() -> &'a GPUOptions {
<GPUOptions as ::protobuf::Message>::default_instance()
}
}
impl GPUOptions {
pub fn new() -> GPUOptions {
::std::default::Default::default()
}
// double per_process_gpu_memory_fraction = 1;
pub fn get_per_process_gpu_memory_fraction(&self) -> f64 {
self.per_process_gpu_memory_fraction
}
pub fn clear_per_process_gpu_memory_fraction(&mut self) {
self.per_process_gpu_memory_fraction = 0.;
}
// Param is passed by value, moved
pub fn set_per_process_gpu_memory_fraction(&mut self, v: f64) {
self.per_process_gpu_memory_fraction = v;
}
// bool allow_growth = 4;
pub fn get_allow_growth(&self) -> bool {
self.allow_growth
}
pub fn clear_allow_growth(&mut self) {
self.allow_growth = false;
}
// Param is passed by value, moved
pub fn set_allow_growth(&mut self, v: bool) {
self.allow_growth = v;
}
// string allocator_type = 2;
pub fn get_allocator_type(&self) -> &str {
&self.allocator_type
}
pub fn clear_allocator_type(&mut self) {
self.allocator_type.clear();
}
// Param is passed by value, moved
pub fn set_allocator_type(&mut self, v: ::std::string::String) {
self.allocator_type = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_allocator_type(&mut self) -> &mut ::std::string::String {
&mut self.allocator_type
}
// Take field
pub fn take_allocator_type(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.allocator_type, ::std::string::String::new())
}
// int64 deferred_deletion_bytes = 3;
pub fn get_deferred_deletion_bytes(&self) -> i64 {
self.deferred_deletion_bytes
}
pub fn clear_deferred_deletion_bytes(&mut self) {
self.deferred_deletion_bytes = 0;
}
// Param is passed by value, moved
pub fn set_deferred_deletion_bytes(&mut self, v: i64) {
self.deferred_deletion_bytes = v;
}
// string visible_device_list = 5;
pub fn get_visible_device_list(&self) -> &str {
&self.visible_device_list
}
pub fn clear_visible_device_list(&mut self) {
self.visible_device_list.clear();
}
// Param is passed by value, moved
pub fn set_visible_device_list(&mut self, v: ::std::string::String) {
self.visible_device_list = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_visible_device_list(&mut self) -> &mut ::std::string::String {
&mut self.visible_device_list
}
// Take field
pub fn take_visible_device_list(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.visible_device_list, ::std::string::String::new())
}
// int32 polling_active_delay_usecs = 6;
pub fn get_polling_active_delay_usecs(&self) -> i32 {
self.polling_active_delay_usecs
}
pub fn clear_polling_active_delay_usecs(&mut self) {
self.polling_active_delay_usecs = 0;
}
// Param is passed by value, moved
pub fn set_polling_active_delay_usecs(&mut self, v: i32) {
self.polling_active_delay_usecs = v;
}
// int32 polling_inactive_delay_msecs = 7;
pub fn get_polling_inactive_delay_msecs(&self) -> i32 {
self.polling_inactive_delay_msecs
}
pub fn clear_polling_inactive_delay_msecs(&mut self) {
self.polling_inactive_delay_msecs = 0;
}
// Param is passed by value, moved
pub fn set_polling_inactive_delay_msecs(&mut self, v: i32) {
self.polling_inactive_delay_msecs = v;
}
// bool force_gpu_compatible = 8;
pub fn get_force_gpu_compatible(&self) -> bool {
self.force_gpu_compatible
}
pub fn clear_force_gpu_compatible(&mut self) {
self.force_gpu_compatible = false;
}
// Param is passed by value, moved
pub fn set_force_gpu_compatible(&mut self, v: bool) {
self.force_gpu_compatible = v;
}
// .tensorflow.GPUOptions.Experimental experimental = 9;
pub fn get_experimental(&self) -> &GPUOptions_Experimental {
self.experimental.as_ref().unwrap_or_else(|| <GPUOptions_Experimental as ::protobuf::Message>::default_instance())
}
pub fn clear_experimental(&mut self) {
self.experimental.clear();
}
pub fn has_experimental(&self) -> bool {
self.experimental.is_some()
}
// Param is passed by value, moved
pub fn set_experimental(&mut self, v: GPUOptions_Experimental) {
self.experimental = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_experimental(&mut self) -> &mut GPUOptions_Experimental {
if self.experimental.is_none() {
self.experimental.set_default();
}
self.experimental.as_mut().unwrap()
}
// Take field
pub fn take_experimental(&mut self) -> GPUOptions_Experimental {
self.experimental.take().unwrap_or_else(|| GPUOptions_Experimental::new())
}
}
impl ::protobuf::Message for GPUOptions {
fn is_initialized(&self) -> bool {
for v in &self.experimental {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_double()?;
self.per_process_gpu_memory_fraction = tmp;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.allow_growth = tmp;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.allocator_type)?;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.deferred_deletion_bytes = tmp;
},
5 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.visible_device_list)?;
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.polling_active_delay_usecs = tmp;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.polling_inactive_delay_msecs = tmp;
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.force_gpu_compatible = tmp;
},
9 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.experimental)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.per_process_gpu_memory_fraction != 0. {
my_size += 9;
}
if self.allow_growth != false {
my_size += 2;
}
if !self.allocator_type.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.allocator_type);
}
if self.deferred_deletion_bytes != 0 {
my_size += ::protobuf::rt::value_size(3, self.deferred_deletion_bytes, ::protobuf::wire_format::WireTypeVarint);
}
if !self.visible_device_list.is_empty() {
my_size += ::protobuf::rt::string_size(5, &self.visible_device_list);
}
if self.polling_active_delay_usecs != 0 {
my_size += ::protobuf::rt::value_size(6, self.polling_active_delay_usecs, ::protobuf::wire_format::WireTypeVarint);
}
if self.polling_inactive_delay_msecs != 0 {
my_size += ::protobuf::rt::value_size(7, self.polling_inactive_delay_msecs, ::protobuf::wire_format::WireTypeVarint);
}
if self.force_gpu_compatible != false {
my_size += 2;
}
if let Some(ref v) = self.experimental.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.per_process_gpu_memory_fraction != 0. {
os.write_double(1, self.per_process_gpu_memory_fraction)?;
}
if self.allow_growth != false {
os.write_bool(4, self.allow_growth)?;
}
if !self.allocator_type.is_empty() {
os.write_string(2, &self.allocator_type)?;
}
if self.deferred_deletion_bytes != 0 {
os.write_int64(3, self.deferred_deletion_bytes)?;
}
if !self.visible_device_list.is_empty() {
os.write_string(5, &self.visible_device_list)?;
}
if self.polling_active_delay_usecs != 0 {
os.write_int32(6, self.polling_active_delay_usecs)?;
}
if self.polling_inactive_delay_msecs != 0 {
os.write_int32(7, self.polling_inactive_delay_msecs)?;
}
if self.force_gpu_compatible != false {
os.write_bool(8, self.force_gpu_compatible)?;
}
if let Some(ref v) = self.experimental.as_ref() {
os.write_tag(9, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> GPUOptions {
GPUOptions::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeDouble>(
"per_process_gpu_memory_fraction",
|m: &GPUOptions| { &m.per_process_gpu_memory_fraction },
|m: &mut GPUOptions| { &mut m.per_process_gpu_memory_fraction },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"allow_growth",
|m: &GPUOptions| { &m.allow_growth },
|m: &mut GPUOptions| { &mut m.allow_growth },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"allocator_type",
|m: &GPUOptions| { &m.allocator_type },
|m: &mut GPUOptions| { &mut m.allocator_type },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"deferred_deletion_bytes",
|m: &GPUOptions| { &m.deferred_deletion_bytes },
|m: &mut GPUOptions| { &mut m.deferred_deletion_bytes },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"visible_device_list",
|m: &GPUOptions| { &m.visible_device_list },
|m: &mut GPUOptions| { &mut m.visible_device_list },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"polling_active_delay_usecs",
|m: &GPUOptions| { &m.polling_active_delay_usecs },
|m: &mut GPUOptions| { &mut m.polling_active_delay_usecs },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"polling_inactive_delay_msecs",
|m: &GPUOptions| { &m.polling_inactive_delay_msecs },
|m: &mut GPUOptions| { &mut m.polling_inactive_delay_msecs },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"force_gpu_compatible",
|m: &GPUOptions| { &m.force_gpu_compatible },
|m: &mut GPUOptions| { &mut m.force_gpu_compatible },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GPUOptions_Experimental>>(
"experimental",
|m: &GPUOptions| { &m.experimental },
|m: &mut GPUOptions| { &mut m.experimental },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<GPUOptions>(
"GPUOptions",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static GPUOptions {
static instance: ::protobuf::rt::LazyV2<GPUOptions> = ::protobuf::rt::LazyV2::INIT;
instance.get(GPUOptions::new)
}
}
impl ::protobuf::Clear for GPUOptions {
fn clear(&mut self) {
self.per_process_gpu_memory_fraction = 0.;
self.allow_growth = false;
self.allocator_type.clear();
self.deferred_deletion_bytes = 0;
self.visible_device_list.clear();
self.polling_active_delay_usecs = 0;
self.polling_inactive_delay_msecs = 0;
self.force_gpu_compatible = false;
self.experimental.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for GPUOptions {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for GPUOptions {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct GPUOptions_Experimental {
// message fields
pub virtual_devices: ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices>,
pub use_unified_memory: bool,
pub num_dev_to_dev_copy_streams: i32,
pub collective_ring_order: ::std::string::String,
pub timestamped_allocator: bool,
pub kernel_tracker_max_interval: i32,
pub kernel_tracker_max_bytes: i32,
pub kernel_tracker_max_pending: i32,
pub internal_fragmentation_fraction: f64,
pub use_cuda_malloc_async: bool,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a GPUOptions_Experimental {
fn default() -> &'a GPUOptions_Experimental {
<GPUOptions_Experimental as ::protobuf::Message>::default_instance()
}
}
impl GPUOptions_Experimental {
pub fn new() -> GPUOptions_Experimental {
::std::default::Default::default()
}
// repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
pub fn get_virtual_devices(&self) -> &[GPUOptions_Experimental_VirtualDevices] {
&self.virtual_devices
}
pub fn clear_virtual_devices(&mut self) {
self.virtual_devices.clear();
}
// Param is passed by value, moved
pub fn set_virtual_devices(&mut self, v: ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices>) {
self.virtual_devices = v;
}
// Mutable pointer to the field.
pub fn mut_virtual_devices(&mut self) -> &mut ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices> {
&mut self.virtual_devices
}
// Take field
pub fn take_virtual_devices(&mut self) -> ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices> {
::std::mem::replace(&mut self.virtual_devices, ::protobuf::RepeatedField::new())
}
// bool use_unified_memory = 2;
pub fn get_use_unified_memory(&self) -> bool {
self.use_unified_memory
}
pub fn clear_use_unified_memory(&mut self) {
self.use_unified_memory = false;
}
// Param is passed by value, moved
pub fn set_use_unified_memory(&mut self, v: bool) {
self.use_unified_memory = v;
}
// int32 num_dev_to_dev_copy_streams = 3;
pub fn get_num_dev_to_dev_copy_streams(&self) -> i32 {
self.num_dev_to_dev_copy_streams
}
pub fn clear_num_dev_to_dev_copy_streams(&mut self) {
self.num_dev_to_dev_copy_streams = 0;
}
// Param is passed by value, moved
pub fn set_num_dev_to_dev_copy_streams(&mut self, v: i32) {
self.num_dev_to_dev_copy_streams = v;
}
// string collective_ring_order = 4;
pub fn get_collective_ring_order(&self) -> &str {
&self.collective_ring_order
}
pub fn clear_collective_ring_order(&mut self) {
self.collective_ring_order.clear();
}
// Param is passed by value, moved
pub fn set_collective_ring_order(&mut self, v: ::std::string::String) {
self.collective_ring_order = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_collective_ring_order(&mut self) -> &mut ::std::string::String {
&mut self.collective_ring_order
}
// Take field
pub fn take_collective_ring_order(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.collective_ring_order, ::std::string::String::new())
}
// bool timestamped_allocator = 5;
pub fn get_timestamped_allocator(&self) -> bool {
self.timestamped_allocator
}
pub fn clear_timestamped_allocator(&mut self) {
self.timestamped_allocator = false;
}
// Param is passed by value, moved
pub fn set_timestamped_allocator(&mut self, v: bool) {
self.timestamped_allocator = v;
}
// int32 kernel_tracker_max_interval = 7;
pub fn get_kernel_tracker_max_interval(&self) -> i32 {
self.kernel_tracker_max_interval
}
pub fn clear_kernel_tracker_max_interval(&mut self) {
self.kernel_tracker_max_interval = 0;
}
// Param is passed by value, moved
pub fn set_kernel_tracker_max_interval(&mut self, v: i32) {
self.kernel_tracker_max_interval = v;
}
// int32 kernel_tracker_max_bytes = 8;
pub fn get_kernel_tracker_max_bytes(&self) -> i32 {
self.kernel_tracker_max_bytes
}
pub fn clear_kernel_tracker_max_bytes(&mut self) {
self.kernel_tracker_max_bytes = 0;
}
// Param is passed by value, moved
pub fn set_kernel_tracker_max_bytes(&mut self, v: i32) {
self.kernel_tracker_max_bytes = v;
}
// int32 kernel_tracker_max_pending = 9;
pub fn get_kernel_tracker_max_pending(&self) -> i32 {
self.kernel_tracker_max_pending
}
pub fn clear_kernel_tracker_max_pending(&mut self) {
self.kernel_tracker_max_pending = 0;
}
// Param is passed by value, moved
pub fn set_kernel_tracker_max_pending(&mut self, v: i32) {
self.kernel_tracker_max_pending = v;
}
// double internal_fragmentation_fraction = 10;
pub fn get_internal_fragmentation_fraction(&self) -> f64 {
self.internal_fragmentation_fraction
}
pub fn clear_internal_fragmentation_fraction(&mut self) {
self.internal_fragmentation_fraction = 0.;
}
// Param is passed by value, moved
pub fn set_internal_fragmentation_fraction(&mut self, v: f64) {
self.internal_fragmentation_fraction = v;
}
// bool use_cuda_malloc_async = 11;
pub fn get_use_cuda_malloc_async(&self) -> bool {
self.use_cuda_malloc_async
}
pub fn clear_use_cuda_malloc_async(&mut self) {
self.use_cuda_malloc_async = false;
}
// Param is passed by value, moved
pub fn set_use_cuda_malloc_async(&mut self, v: bool) {
self.use_cuda_malloc_async = v;
}
}
impl ::protobuf::Message for GPUOptions_Experimental {
fn is_initialized(&self) -> bool {
for v in &self.virtual_devices {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.virtual_devices)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.use_unified_memory = tmp;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.num_dev_to_dev_copy_streams = tmp;
},
4 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.collective_ring_order)?;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.timestamped_allocator = tmp;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.kernel_tracker_max_interval = tmp;
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.kernel_tracker_max_bytes = tmp;
},
9 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.kernel_tracker_max_pending = tmp;
},
10 => {
if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_double()?;
self.internal_fragmentation_fraction = tmp;
},
11 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.use_cuda_malloc_async = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.virtual_devices {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if self.use_unified_memory != false {
my_size += 2;
}
if self.num_dev_to_dev_copy_streams != 0 {
my_size += ::protobuf::rt::value_size(3, self.num_dev_to_dev_copy_streams, ::protobuf::wire_format::WireTypeVarint);
}
if !self.collective_ring_order.is_empty() {
my_size += ::protobuf::rt::string_size(4, &self.collective_ring_order);
}
if self.timestamped_allocator != false {
my_size += 2;
}
if self.kernel_tracker_max_interval != 0 {
my_size += ::protobuf::rt::value_size(7, self.kernel_tracker_max_interval, ::protobuf::wire_format::WireTypeVarint);
}
if self.kernel_tracker_max_bytes != 0 {
my_size += ::protobuf::rt::value_size(8, self.kernel_tracker_max_bytes, ::protobuf::wire_format::WireTypeVarint);
}
if self.kernel_tracker_max_pending != 0 {
my_size += ::protobuf::rt::value_size(9, self.kernel_tracker_max_pending, ::protobuf::wire_format::WireTypeVarint);
}
if self.internal_fragmentation_fraction != 0. {
my_size += 9;
}
if self.use_cuda_malloc_async != false {
my_size += 2;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.virtual_devices {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if self.use_unified_memory != false {
os.write_bool(2, self.use_unified_memory)?;
}
if self.num_dev_to_dev_copy_streams != 0 {
os.write_int32(3, self.num_dev_to_dev_copy_streams)?;
}
if !self.collective_ring_order.is_empty() {
os.write_string(4, &self.collective_ring_order)?;
}
if self.timestamped_allocator != false {
os.write_bool(5, self.timestamped_allocator)?;
}
if self.kernel_tracker_max_interval != 0 {
os.write_int32(7, self.kernel_tracker_max_interval)?;
}
if self.kernel_tracker_max_bytes != 0 {
os.write_int32(8, self.kernel_tracker_max_bytes)?;
}
if self.kernel_tracker_max_pending != 0 {
os.write_int32(9, self.kernel_tracker_max_pending)?;
}
if self.internal_fragmentation_fraction != 0. {
os.write_double(10, self.internal_fragmentation_fraction)?;
}
if self.use_cuda_malloc_async != false {
os.write_bool(11, self.use_cuda_malloc_async)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> GPUOptions_Experimental {
GPUOptions_Experimental::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GPUOptions_Experimental_VirtualDevices>>(
"virtual_devices",
|m: &GPUOptions_Experimental| { &m.virtual_devices },
|m: &mut GPUOptions_Experimental| { &mut m.virtual_devices },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"use_unified_memory",
|m: &GPUOptions_Experimental| { &m.use_unified_memory },
|m: &mut GPUOptions_Experimental| { &mut m.use_unified_memory },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"num_dev_to_dev_copy_streams",
|m: &GPUOptions_Experimental| { &m.num_dev_to_dev_copy_streams },
|m: &mut GPUOptions_Experimental| { &mut m.num_dev_to_dev_copy_streams },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"collective_ring_order",
|m: &GPUOptions_Experimental| { &m.collective_ring_order },
|m: &mut GPUOptions_Experimental| { &mut m.collective_ring_order },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"timestamped_allocator",
|m: &GPUOptions_Experimental| { &m.timestamped_allocator },
|m: &mut GPUOptions_Experimental| { &mut m.timestamped_allocator },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"kernel_tracker_max_interval",
|m: &GPUOptions_Experimental| { &m.kernel_tracker_max_interval },
|m: &mut GPUOptions_Experimental| { &mut m.kernel_tracker_max_interval },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"kernel_tracker_max_bytes",
|m: &GPUOptions_Experimental| { &m.kernel_tracker_max_bytes },
|m: &mut GPUOptions_Experimental| { &mut m.kernel_tracker_max_bytes },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"kernel_tracker_max_pending",
|m: &GPUOptions_Experimental| { &m.kernel_tracker_max_pending },
|m: &mut GPUOptions_Experimental| { &mut m.kernel_tracker_max_pending },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeDouble>(
"internal_fragmentation_fraction",
|m: &GPUOptions_Experimental| { &m.internal_fragmentation_fraction },
|m: &mut GPUOptions_Experimental| { &mut m.internal_fragmentation_fraction },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"use_cuda_malloc_async",
|m: &GPUOptions_Experimental| { &m.use_cuda_malloc_async },
|m: &mut GPUOptions_Experimental| { &mut m.use_cuda_malloc_async },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<GPUOptions_Experimental>(
"GPUOptions.Experimental",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static GPUOptions_Experimental {
static instance: ::protobuf::rt::LazyV2<GPUOptions_Experimental> = ::protobuf::rt::LazyV2::INIT;
instance.get(GPUOptions_Experimental::new)
}
}
impl ::protobuf::Clear for GPUOptions_Experimental {
fn clear(&mut self) {
self.virtual_devices.clear();
self.use_unified_memory = false;
self.num_dev_to_dev_copy_streams = 0;
self.collective_ring_order.clear();
self.timestamped_allocator = false;
self.kernel_tracker_max_interval = 0;
self.kernel_tracker_max_bytes = 0;
self.kernel_tracker_max_pending = 0;
self.internal_fragmentation_fraction = 0.;
self.use_cuda_malloc_async = false;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for GPUOptions_Experimental {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for GPUOptions_Experimental {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct GPUOptions_Experimental_VirtualDevices {
// message fields
pub memory_limit_mb: ::std::vec::Vec<f32>,
pub priority: ::std::vec::Vec<i32>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a GPUOptions_Experimental_VirtualDevices {
fn default() -> &'a GPUOptions_Experimental_VirtualDevices {
<GPUOptions_Experimental_VirtualDevices as ::protobuf::Message>::default_instance()
}
}
impl GPUOptions_Experimental_VirtualDevices {
pub fn new() -> GPUOptions_Experimental_VirtualDevices {
::std::default::Default::default()
}
// repeated float memory_limit_mb = 1;
pub fn get_memory_limit_mb(&self) -> &[f32] {
&self.memory_limit_mb
}
pub fn clear_memory_limit_mb(&mut self) {
self.memory_limit_mb.clear();
}
// Param is passed by value, moved
pub fn set_memory_limit_mb(&mut self, v: ::std::vec::Vec<f32>) {
self.memory_limit_mb = v;
}
// Mutable pointer to the field.
pub fn mut_memory_limit_mb(&mut self) -> &mut ::std::vec::Vec<f32> {
&mut self.memory_limit_mb
}
// Take field
pub fn take_memory_limit_mb(&mut self) -> ::std::vec::Vec<f32> {
::std::mem::replace(&mut self.memory_limit_mb, ::std::vec::Vec::new())
}
// repeated int32 priority = 2;
pub fn get_priority(&self) -> &[i32] {
&self.priority
}
pub fn clear_priority(&mut self) {
self.priority.clear();
}
// Param is passed by value, moved
pub fn set_priority(&mut self, v: ::std::vec::Vec<i32>) {
self.priority = v;
}
// Mutable pointer to the field.
pub fn mut_priority(&mut self) -> &mut ::std::vec::Vec<i32> {
&mut self.priority
}
// Take field
pub fn take_priority(&mut self) -> ::std::vec::Vec<i32> {
::std::mem::replace(&mut self.priority, ::std::vec::Vec::new())
}
}
impl ::protobuf::Message for GPUOptions_Experimental_VirtualDevices {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_float_into(wire_type, is, &mut self.memory_limit_mb)?;
},
2 => {
::protobuf::rt::read_repeated_int32_into(wire_type, is, &mut self.priority)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
my_size += 5 * self.memory_limit_mb.len() as u32;
for value in &self.priority {
my_size += ::protobuf::rt::value_size(2, *value, ::protobuf::wire_format::WireTypeVarint);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.memory_limit_mb {
os.write_float(1, *v)?;
};
for v in &self.priority {
os.write_int32(2, *v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> GPUOptions_Experimental_VirtualDevices {
GPUOptions_Experimental_VirtualDevices::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeFloat>(
"memory_limit_mb",
|m: &GPUOptions_Experimental_VirtualDevices| { &m.memory_limit_mb },
|m: &mut GPUOptions_Experimental_VirtualDevices| { &mut m.memory_limit_mb },
));
fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"priority",
|m: &GPUOptions_Experimental_VirtualDevices| { &m.priority },
|m: &mut GPUOptions_Experimental_VirtualDevices| { &mut m.priority },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<GPUOptions_Experimental_VirtualDevices>(
"GPUOptions.Experimental.VirtualDevices",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static GPUOptions_Experimental_VirtualDevices {
static instance: ::protobuf::rt::LazyV2<GPUOptions_Experimental_VirtualDevices> = ::protobuf::rt::LazyV2::INIT;
instance.get(GPUOptions_Experimental_VirtualDevices::new)
}
}
impl ::protobuf::Clear for GPUOptions_Experimental_VirtualDevices {
fn clear(&mut self) {
self.memory_limit_mb.clear();
self.priority.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for GPUOptions_Experimental_VirtualDevices {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for GPUOptions_Experimental_VirtualDevices {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct OptimizerOptions {
// message fields
pub do_common_subexpression_elimination: bool,
pub do_constant_folding: bool,
pub max_folded_constant_in_bytes: i64,
pub do_function_inlining: bool,
pub opt_level: OptimizerOptions_Level,
pub global_jit_level: OptimizerOptions_GlobalJitLevel,
pub cpu_global_jit: bool,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a OptimizerOptions {
fn default() -> &'a OptimizerOptions {
<OptimizerOptions as ::protobuf::Message>::default_instance()
}
}
impl OptimizerOptions {
pub fn new() -> OptimizerOptions {
::std::default::Default::default()
}
// bool do_common_subexpression_elimination = 1;
pub fn get_do_common_subexpression_elimination(&self) -> bool {
self.do_common_subexpression_elimination
}
pub fn clear_do_common_subexpression_elimination(&mut self) {
self.do_common_subexpression_elimination = false;
}
// Param is passed by value, moved
pub fn set_do_common_subexpression_elimination(&mut self, v: bool) {
self.do_common_subexpression_elimination = v;
}
// bool do_constant_folding = 2;
pub fn get_do_constant_folding(&self) -> bool {
self.do_constant_folding
}
pub fn clear_do_constant_folding(&mut self) {
self.do_constant_folding = false;
}
// Param is passed by value, moved
pub fn set_do_constant_folding(&mut self, v: bool) {
self.do_constant_folding = v;
}
// int64 max_folded_constant_in_bytes = 6;
pub fn get_max_folded_constant_in_bytes(&self) -> i64 {
self.max_folded_constant_in_bytes
}
pub fn clear_max_folded_constant_in_bytes(&mut self) {
self.max_folded_constant_in_bytes = 0;
}
// Param is passed by value, moved
pub fn set_max_folded_constant_in_bytes(&mut self, v: i64) {
self.max_folded_constant_in_bytes = v;
}
// bool do_function_inlining = 4;
pub fn get_do_function_inlining(&self) -> bool {
self.do_function_inlining
}
pub fn clear_do_function_inlining(&mut self) {
self.do_function_inlining = false;
}
// Param is passed by value, moved
pub fn set_do_function_inlining(&mut self, v: bool) {
self.do_function_inlining = v;
}
// .tensorflow.OptimizerOptions.Level opt_level = 3;
pub fn get_opt_level(&self) -> OptimizerOptions_Level {
self.opt_level
}
pub fn clear_opt_level(&mut self) {
self.opt_level = OptimizerOptions_Level::L1;
}
// Param is passed by value, moved
pub fn set_opt_level(&mut self, v: OptimizerOptions_Level) {
self.opt_level = v;
}
// .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
pub fn get_global_jit_level(&self) -> OptimizerOptions_GlobalJitLevel {
self.global_jit_level
}
pub fn clear_global_jit_level(&mut self) {
self.global_jit_level = OptimizerOptions_GlobalJitLevel::DEFAULT;
}
// Param is passed by value, moved
pub fn set_global_jit_level(&mut self, v: OptimizerOptions_GlobalJitLevel) {
self.global_jit_level = v;
}
// bool cpu_global_jit = 7;
pub fn get_cpu_global_jit(&self) -> bool {
self.cpu_global_jit
}
pub fn clear_cpu_global_jit(&mut self) {
self.cpu_global_jit = false;
}
// Param is passed by value, moved
pub fn set_cpu_global_jit(&mut self, v: bool) {
self.cpu_global_jit = v;
}
}
impl ::protobuf::Message for OptimizerOptions {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.do_common_subexpression_elimination = tmp;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.do_constant_folding = tmp;
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.max_folded_constant_in_bytes = tmp;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.do_function_inlining = tmp;
},
3 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.opt_level, 3, &mut self.unknown_fields)?
},
5 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.global_jit_level, 5, &mut self.unknown_fields)?
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.cpu_global_jit = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.do_common_subexpression_elimination != false {
my_size += 2;
}
if self.do_constant_folding != false {
my_size += 2;
}
if self.max_folded_constant_in_bytes != 0 {
my_size += ::protobuf::rt::value_size(6, self.max_folded_constant_in_bytes, ::protobuf::wire_format::WireTypeVarint);
}
if self.do_function_inlining != false {
my_size += 2;
}
if self.opt_level != OptimizerOptions_Level::L1 {
my_size += ::protobuf::rt::enum_size(3, self.opt_level);
}
if self.global_jit_level != OptimizerOptions_GlobalJitLevel::DEFAULT {
my_size += ::protobuf::rt::enum_size(5, self.global_jit_level);
}
if self.cpu_global_jit != false {
my_size += 2;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.do_common_subexpression_elimination != false {
os.write_bool(1, self.do_common_subexpression_elimination)?;
}
if self.do_constant_folding != false {
os.write_bool(2, self.do_constant_folding)?;
}
if self.max_folded_constant_in_bytes != 0 {
os.write_int64(6, self.max_folded_constant_in_bytes)?;
}
if self.do_function_inlining != false {
os.write_bool(4, self.do_function_inlining)?;
}
if self.opt_level != OptimizerOptions_Level::L1 {
os.write_enum(3, ::protobuf::ProtobufEnum::value(&self.opt_level))?;
}
if self.global_jit_level != OptimizerOptions_GlobalJitLevel::DEFAULT {
os.write_enum(5, ::protobuf::ProtobufEnum::value(&self.global_jit_level))?;
}
if self.cpu_global_jit != false {
os.write_bool(7, self.cpu_global_jit)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> OptimizerOptions {
OptimizerOptions::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"do_common_subexpression_elimination",
|m: &OptimizerOptions| { &m.do_common_subexpression_elimination },
|m: &mut OptimizerOptions| { &mut m.do_common_subexpression_elimination },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"do_constant_folding",
|m: &OptimizerOptions| { &m.do_constant_folding },
|m: &mut OptimizerOptions| { &mut m.do_constant_folding },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"max_folded_constant_in_bytes",
|m: &OptimizerOptions| { &m.max_folded_constant_in_bytes },
|m: &mut OptimizerOptions| { &mut m.max_folded_constant_in_bytes },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"do_function_inlining",
|m: &OptimizerOptions| { &m.do_function_inlining },
|m: &mut OptimizerOptions| { &mut m.do_function_inlining },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<OptimizerOptions_Level>>(
"opt_level",
|m: &OptimizerOptions| { &m.opt_level },
|m: &mut OptimizerOptions| { &mut m.opt_level },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<OptimizerOptions_GlobalJitLevel>>(
"global_jit_level",
|m: &OptimizerOptions| { &m.global_jit_level },
|m: &mut OptimizerOptions| { &mut m.global_jit_level },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"cpu_global_jit",
|m: &OptimizerOptions| { &m.cpu_global_jit },
|m: &mut OptimizerOptions| { &mut m.cpu_global_jit },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<OptimizerOptions>(
"OptimizerOptions",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static OptimizerOptions {
static instance: ::protobuf::rt::LazyV2<OptimizerOptions> = ::protobuf::rt::LazyV2::INIT;
instance.get(OptimizerOptions::new)
}
}
impl ::protobuf::Clear for OptimizerOptions {
fn clear(&mut self) {
self.do_common_subexpression_elimination = false;
self.do_constant_folding = false;
self.max_folded_constant_in_bytes = 0;
self.do_function_inlining = false;
self.opt_level = OptimizerOptions_Level::L1;
self.global_jit_level = OptimizerOptions_GlobalJitLevel::DEFAULT;
self.cpu_global_jit = false;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for OptimizerOptions {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for OptimizerOptions {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum OptimizerOptions_Level {
L1 = 0,
L0 = -1,
}
impl ::protobuf::ProtobufEnum for OptimizerOptions_Level {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<OptimizerOptions_Level> {
match value {
0 => ::std::option::Option::Some(OptimizerOptions_Level::L1),
-1 => ::std::option::Option::Some(OptimizerOptions_Level::L0),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [OptimizerOptions_Level] = &[
OptimizerOptions_Level::L1,
OptimizerOptions_Level::L0,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<OptimizerOptions_Level>("OptimizerOptions.Level", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for OptimizerOptions_Level {
}
impl ::std::default::Default for OptimizerOptions_Level {
fn default() -> Self {
OptimizerOptions_Level::L1
}
}
impl ::protobuf::reflect::ProtobufValue for OptimizerOptions_Level {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum OptimizerOptions_GlobalJitLevel {
DEFAULT = 0,
OFF = -1,
ON_1 = 1,
ON_2 = 2,
}
impl ::protobuf::ProtobufEnum for OptimizerOptions_GlobalJitLevel {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<OptimizerOptions_GlobalJitLevel> {
match value {
0 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::DEFAULT),
-1 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::OFF),
1 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::ON_1),
2 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::ON_2),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [OptimizerOptions_GlobalJitLevel] = &[
OptimizerOptions_GlobalJitLevel::DEFAULT,
OptimizerOptions_GlobalJitLevel::OFF,
OptimizerOptions_GlobalJitLevel::ON_1,
OptimizerOptions_GlobalJitLevel::ON_2,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<OptimizerOptions_GlobalJitLevel>("OptimizerOptions.GlobalJitLevel", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for OptimizerOptions_GlobalJitLevel {
}
impl ::std::default::Default for OptimizerOptions_GlobalJitLevel {
fn default() -> Self {
OptimizerOptions_GlobalJitLevel::DEFAULT
}
}
impl ::protobuf::reflect::ProtobufValue for OptimizerOptions_GlobalJitLevel {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(PartialEq,Clone,Default)]
pub struct GraphOptions {
// message fields
pub enable_recv_scheduling: bool,
pub optimizer_options: ::protobuf::SingularPtrField<OptimizerOptions>,
pub build_cost_model: i64,
pub build_cost_model_after: i64,
pub infer_shapes: bool,
pub place_pruned_graph: bool,
pub enable_bfloat16_sendrecv: bool,
pub timeline_step: i32,
pub rewrite_options: ::protobuf::SingularPtrField<super::rewriter_config::RewriterConfig>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a GraphOptions {
fn default() -> &'a GraphOptions {
<GraphOptions as ::protobuf::Message>::default_instance()
}
}
impl GraphOptions {
pub fn new() -> GraphOptions {
::std::default::Default::default()
}
// bool enable_recv_scheduling = 2;
pub fn get_enable_recv_scheduling(&self) -> bool {
self.enable_recv_scheduling
}
pub fn clear_enable_recv_scheduling(&mut self) {
self.enable_recv_scheduling = false;
}
// Param is passed by value, moved
pub fn set_enable_recv_scheduling(&mut self, v: bool) {
self.enable_recv_scheduling = v;
}
// .tensorflow.OptimizerOptions optimizer_options = 3;
pub fn get_optimizer_options(&self) -> &OptimizerOptions {
self.optimizer_options.as_ref().unwrap_or_else(|| <OptimizerOptions as ::protobuf::Message>::default_instance())
}
pub fn clear_optimizer_options(&mut self) {
self.optimizer_options.clear();
}
pub fn has_optimizer_options(&self) -> bool {
self.optimizer_options.is_some()
}
// Param is passed by value, moved
pub fn set_optimizer_options(&mut self, v: OptimizerOptions) {
self.optimizer_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_optimizer_options(&mut self) -> &mut OptimizerOptions {
if self.optimizer_options.is_none() {
self.optimizer_options.set_default();
}
self.optimizer_options.as_mut().unwrap()
}
// Take field
pub fn take_optimizer_options(&mut self) -> OptimizerOptions {
self.optimizer_options.take().unwrap_or_else(|| OptimizerOptions::new())
}
// int64 build_cost_model = 4;
pub fn get_build_cost_model(&self) -> i64 {
self.build_cost_model
}
pub fn clear_build_cost_model(&mut self) {
self.build_cost_model = 0;
}
// Param is passed by value, moved
pub fn set_build_cost_model(&mut self, v: i64) {
self.build_cost_model = v;
}
// int64 build_cost_model_after = 9;
pub fn get_build_cost_model_after(&self) -> i64 {
self.build_cost_model_after
}
pub fn clear_build_cost_model_after(&mut self) {
self.build_cost_model_after = 0;
}
// Param is passed by value, moved
pub fn set_build_cost_model_after(&mut self, v: i64) {
self.build_cost_model_after = v;
}
// bool infer_shapes = 5;
pub fn get_infer_shapes(&self) -> bool {
self.infer_shapes
}
pub fn clear_infer_shapes(&mut self) {
self.infer_shapes = false;
}
// Param is passed by value, moved
pub fn set_infer_shapes(&mut self, v: bool) {
self.infer_shapes = v;
}
// bool place_pruned_graph = 6;
pub fn get_place_pruned_graph(&self) -> bool {
self.place_pruned_graph
}
pub fn clear_place_pruned_graph(&mut self) {
self.place_pruned_graph = false;
}
// Param is passed by value, moved
pub fn set_place_pruned_graph(&mut self, v: bool) {
self.place_pruned_graph = v;
}
// bool enable_bfloat16_sendrecv = 7;
pub fn get_enable_bfloat16_sendrecv(&self) -> bool {
self.enable_bfloat16_sendrecv
}
pub fn clear_enable_bfloat16_sendrecv(&mut self) {
self.enable_bfloat16_sendrecv = false;
}
// Param is passed by value, moved
pub fn set_enable_bfloat16_sendrecv(&mut self, v: bool) {
self.enable_bfloat16_sendrecv = v;
}
// int32 timeline_step = 8;
pub fn get_timeline_step(&self) -> i32 {
self.timeline_step
}
pub fn clear_timeline_step(&mut self) {
self.timeline_step = 0;
}
// Param is passed by value, moved
pub fn set_timeline_step(&mut self, v: i32) {
self.timeline_step = v;
}
// .tensorflow.RewriterConfig rewrite_options = 10;
pub fn get_rewrite_options(&self) -> &super::rewriter_config::RewriterConfig {
self.rewrite_options.as_ref().unwrap_or_else(|| <super::rewriter_config::RewriterConfig as ::protobuf::Message>::default_instance())
}
pub fn clear_rewrite_options(&mut self) {
self.rewrite_options.clear();
}
pub fn has_rewrite_options(&self) -> bool {
self.rewrite_options.is_some()
}
// Param is passed by value, moved
pub fn set_rewrite_options(&mut self, v: super::rewriter_config::RewriterConfig) {
self.rewrite_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_rewrite_options(&mut self) -> &mut super::rewriter_config::RewriterConfig {
if self.rewrite_options.is_none() {
self.rewrite_options.set_default();
}
self.rewrite_options.as_mut().unwrap()
}
// Take field
pub fn take_rewrite_options(&mut self) -> super::rewriter_config::RewriterConfig {
self.rewrite_options.take().unwrap_or_else(|| super::rewriter_config::RewriterConfig::new())
}
}
impl ::protobuf::Message for GraphOptions {
fn is_initialized(&self) -> bool {
for v in &self.optimizer_options {
if !v.is_initialized() {
return false;
}
};
for v in &self.rewrite_options {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.enable_recv_scheduling = tmp;
},
3 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.optimizer_options)?;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.build_cost_model = tmp;
},
9 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.build_cost_model_after = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.infer_shapes = tmp;
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.place_pruned_graph = tmp;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.enable_bfloat16_sendrecv = tmp;
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.timeline_step = tmp;
},
10 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.rewrite_options)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.enable_recv_scheduling != false {
my_size += 2;
}
if let Some(ref v) = self.optimizer_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if self.build_cost_model != 0 {
my_size += ::protobuf::rt::value_size(4, self.build_cost_model, ::protobuf::wire_format::WireTypeVarint);
}
if self.build_cost_model_after != 0 {
my_size += ::protobuf::rt::value_size(9, self.build_cost_model_after, ::protobuf::wire_format::WireTypeVarint);
}
if self.infer_shapes != false {
my_size += 2;
}
if self.place_pruned_graph != false {
my_size += 2;
}
if self.enable_bfloat16_sendrecv != false {
my_size += 2;
}
if self.timeline_step != 0 {
my_size += ::protobuf::rt::value_size(8, self.timeline_step, ::protobuf::wire_format::WireTypeVarint);
}
if let Some(ref v) = self.rewrite_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.enable_recv_scheduling != false {
os.write_bool(2, self.enable_recv_scheduling)?;
}
if let Some(ref v) = self.optimizer_options.as_ref() {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if self.build_cost_model != 0 {
os.write_int64(4, self.build_cost_model)?;
}
if self.build_cost_model_after != 0 {
os.write_int64(9, self.build_cost_model_after)?;
}
if self.infer_shapes != false {
os.write_bool(5, self.infer_shapes)?;
}
if self.place_pruned_graph != false {
os.write_bool(6, self.place_pruned_graph)?;
}
if self.enable_bfloat16_sendrecv != false {
os.write_bool(7, self.enable_bfloat16_sendrecv)?;
}
if self.timeline_step != 0 {
os.write_int32(8, self.timeline_step)?;
}
if let Some(ref v) = self.rewrite_options.as_ref() {
os.write_tag(10, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> GraphOptions {
GraphOptions::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"enable_recv_scheduling",
|m: &GraphOptions| { &m.enable_recv_scheduling },
|m: &mut GraphOptions| { &mut m.enable_recv_scheduling },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<OptimizerOptions>>(
"optimizer_options",
|m: &GraphOptions| { &m.optimizer_options },
|m: &mut GraphOptions| { &mut m.optimizer_options },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"build_cost_model",
|m: &GraphOptions| { &m.build_cost_model },
|m: &mut GraphOptions| { &mut m.build_cost_model },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"build_cost_model_after",
|m: &GraphOptions| { &m.build_cost_model_after },
|m: &mut GraphOptions| { &mut m.build_cost_model_after },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"infer_shapes",
|m: &GraphOptions| { &m.infer_shapes },
|m: &mut GraphOptions| { &mut m.infer_shapes },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"place_pruned_graph",
|m: &GraphOptions| { &m.place_pruned_graph },
|m: &mut GraphOptions| { &mut m.place_pruned_graph },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"enable_bfloat16_sendrecv",
|m: &GraphOptions| { &m.enable_bfloat16_sendrecv },
|m: &mut GraphOptions| { &mut m.enable_bfloat16_sendrecv },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"timeline_step",
|m: &GraphOptions| { &m.timeline_step },
|m: &mut GraphOptions| { &mut m.timeline_step },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::rewriter_config::RewriterConfig>>(
"rewrite_options",
|m: &GraphOptions| { &m.rewrite_options },
|m: &mut GraphOptions| { &mut m.rewrite_options },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<GraphOptions>(
"GraphOptions",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static GraphOptions {
static instance: ::protobuf::rt::LazyV2<GraphOptions> = ::protobuf::rt::LazyV2::INIT;
instance.get(GraphOptions::new)
}
}
impl ::protobuf::Clear for GraphOptions {
fn clear(&mut self) {
self.enable_recv_scheduling = false;
self.optimizer_options.clear();
self.build_cost_model = 0;
self.build_cost_model_after = 0;
self.infer_shapes = false;
self.place_pruned_graph = false;
self.enable_bfloat16_sendrecv = false;
self.timeline_step = 0;
self.rewrite_options.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for GraphOptions {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for GraphOptions {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct ThreadPoolOptionProto {
// message fields
pub num_threads: i32,
pub global_name: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ThreadPoolOptionProto {
fn default() -> &'a ThreadPoolOptionProto {
<ThreadPoolOptionProto as ::protobuf::Message>::default_instance()
}
}
impl ThreadPoolOptionProto {
pub fn new() -> ThreadPoolOptionProto {
::std::default::Default::default()
}
// int32 num_threads = 1;
pub fn get_num_threads(&self) -> i32 {
self.num_threads
}
pub fn clear_num_threads(&mut self) {
self.num_threads = 0;
}
// Param is passed by value, moved
pub fn set_num_threads(&mut self, v: i32) {
self.num_threads = v;
}
// string global_name = 2;
pub fn get_global_name(&self) -> &str {
&self.global_name
}
pub fn clear_global_name(&mut self) {
self.global_name.clear();
}
// Param is passed by value, moved
pub fn set_global_name(&mut self, v: ::std::string::String) {
self.global_name = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_global_name(&mut self) -> &mut ::std::string::String {
&mut self.global_name
}
// Take field
pub fn take_global_name(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.global_name, ::std::string::String::new())
}
}
impl ::protobuf::Message for ThreadPoolOptionProto {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.num_threads = tmp;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.global_name)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.num_threads != 0 {
my_size += ::protobuf::rt::value_size(1, self.num_threads, ::protobuf::wire_format::WireTypeVarint);
}
if !self.global_name.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.global_name);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.num_threads != 0 {
os.write_int32(1, self.num_threads)?;
}
if !self.global_name.is_empty() {
os.write_string(2, &self.global_name)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ThreadPoolOptionProto {
ThreadPoolOptionProto::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"num_threads",
|m: &ThreadPoolOptionProto| { &m.num_threads },
|m: &mut ThreadPoolOptionProto| { &mut m.num_threads },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"global_name",
|m: &ThreadPoolOptionProto| { &m.global_name },
|m: &mut ThreadPoolOptionProto| { &mut m.global_name },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<ThreadPoolOptionProto>(
"ThreadPoolOptionProto",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static ThreadPoolOptionProto {
static instance: ::protobuf::rt::LazyV2<ThreadPoolOptionProto> = ::protobuf::rt::LazyV2::INIT;
instance.get(ThreadPoolOptionProto::new)
}
}
impl ::protobuf::Clear for ThreadPoolOptionProto {
fn clear(&mut self) {
self.num_threads = 0;
self.global_name.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for ThreadPoolOptionProto {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for ThreadPoolOptionProto {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct RPCOptions {
// message fields
pub use_rpc_for_inprocess_master: bool,
pub compression_algorithm: ::std::string::String,
pub compression_level: i32,
pub cache_rpc_response: bool,
pub disable_session_connection_sharing: bool,
pub num_channels_per_target: i32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RPCOptions {
fn default() -> &'a RPCOptions {
<RPCOptions as ::protobuf::Message>::default_instance()
}
}
impl RPCOptions {
pub fn new() -> RPCOptions {
::std::default::Default::default()
}
// bool use_rpc_for_inprocess_master = 1;
pub fn get_use_rpc_for_inprocess_master(&self) -> bool {
self.use_rpc_for_inprocess_master
}
pub fn clear_use_rpc_for_inprocess_master(&mut self) {
self.use_rpc_for_inprocess_master = false;
}
// Param is passed by value, moved
pub fn set_use_rpc_for_inprocess_master(&mut self, v: bool) {
self.use_rpc_for_inprocess_master = v;
}
// string compression_algorithm = 2;
pub fn get_compression_algorithm(&self) -> &str {
&self.compression_algorithm
}
pub fn clear_compression_algorithm(&mut self) {
self.compression_algorithm.clear();
}
// Param is passed by value, moved
pub fn set_compression_algorithm(&mut self, v: ::std::string::String) {
self.compression_algorithm = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_compression_algorithm(&mut self) -> &mut ::std::string::String {
&mut self.compression_algorithm
}
// Take field
pub fn take_compression_algorithm(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.compression_algorithm, ::std::string::String::new())
}
// int32 compression_level = 3;
pub fn get_compression_level(&self) -> i32 {
self.compression_level
}
pub fn clear_compression_level(&mut self) {
self.compression_level = 0;
}
// Param is passed by value, moved
pub fn set_compression_level(&mut self, v: i32) {
self.compression_level = v;
}
// bool cache_rpc_response = 4;
pub fn get_cache_rpc_response(&self) -> bool {
self.cache_rpc_response
}
pub fn clear_cache_rpc_response(&mut self) {
self.cache_rpc_response = false;
}
// Param is passed by value, moved
pub fn set_cache_rpc_response(&mut self, v: bool) {
self.cache_rpc_response = v;
}
// bool disable_session_connection_sharing = 5;
pub fn get_disable_session_connection_sharing(&self) -> bool {
self.disable_session_connection_sharing
}
pub fn clear_disable_session_connection_sharing(&mut self) {
self.disable_session_connection_sharing = false;
}
// Param is passed by value, moved
pub fn set_disable_session_connection_sharing(&mut self, v: bool) {
self.disable_session_connection_sharing = v;
}
// int32 num_channels_per_target = 6;
pub fn get_num_channels_per_target(&self) -> i32 {
self.num_channels_per_target
}
pub fn clear_num_channels_per_target(&mut self) {
self.num_channels_per_target = 0;
}
// Param is passed by value, moved
pub fn set_num_channels_per_target(&mut self, v: i32) {
self.num_channels_per_target = v;
}
}
impl ::protobuf::Message for RPCOptions {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.use_rpc_for_inprocess_master = tmp;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.compression_algorithm)?;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.compression_level = tmp;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.cache_rpc_response = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.disable_session_connection_sharing = tmp;
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.num_channels_per_target = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.use_rpc_for_inprocess_master != false {
my_size += 2;
}
if !self.compression_algorithm.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.compression_algorithm);
}
if self.compression_level != 0 {
my_size += ::protobuf::rt::value_size(3, self.compression_level, ::protobuf::wire_format::WireTypeVarint);
}
if self.cache_rpc_response != false {
my_size += 2;
}
if self.disable_session_connection_sharing != false {
my_size += 2;
}
if self.num_channels_per_target != 0 {
my_size += ::protobuf::rt::value_size(6, self.num_channels_per_target, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.use_rpc_for_inprocess_master != false {
os.write_bool(1, self.use_rpc_for_inprocess_master)?;
}
if !self.compression_algorithm.is_empty() {
os.write_string(2, &self.compression_algorithm)?;
}
if self.compression_level != 0 {
os.write_int32(3, self.compression_level)?;
}
if self.cache_rpc_response != false {
os.write_bool(4, self.cache_rpc_response)?;
}
if self.disable_session_connection_sharing != false {
os.write_bool(5, self.disable_session_connection_sharing)?;
}
if self.num_channels_per_target != 0 {
os.write_int32(6, self.num_channels_per_target)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RPCOptions {
RPCOptions::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"use_rpc_for_inprocess_master",
|m: &RPCOptions| { &m.use_rpc_for_inprocess_master },
|m: &mut RPCOptions| { &mut m.use_rpc_for_inprocess_master },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"compression_algorithm",
|m: &RPCOptions| { &m.compression_algorithm },
|m: &mut RPCOptions| { &mut m.compression_algorithm },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"compression_level",
|m: &RPCOptions| { &m.compression_level },
|m: &mut RPCOptions| { &mut m.compression_level },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"cache_rpc_response",
|m: &RPCOptions| { &m.cache_rpc_response },
|m: &mut RPCOptions| { &mut m.cache_rpc_response },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"disable_session_connection_sharing",
|m: &RPCOptions| { &m.disable_session_connection_sharing },
|m: &mut RPCOptions| { &mut m.disable_session_connection_sharing },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"num_channels_per_target",
|m: &RPCOptions| { &m.num_channels_per_target },
|m: &mut RPCOptions| { &mut m.num_channels_per_target },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<RPCOptions>(
"RPCOptions",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static RPCOptions {
static instance: ::protobuf::rt::LazyV2<RPCOptions> = ::protobuf::rt::LazyV2::INIT;
instance.get(RPCOptions::new)
}
}
impl ::protobuf::Clear for RPCOptions {
fn clear(&mut self) {
self.use_rpc_for_inprocess_master = false;
self.compression_algorithm.clear();
self.compression_level = 0;
self.cache_rpc_response = false;
self.disable_session_connection_sharing = false;
self.num_channels_per_target = 0;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RPCOptions {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RPCOptions {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct SessionMetadata {
// message fields
pub name: ::std::string::String,
pub version: i64,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a SessionMetadata {
fn default() -> &'a SessionMetadata {
<SessionMetadata as ::protobuf::Message>::default_instance()
}
}
impl SessionMetadata {
pub fn new() -> SessionMetadata {
::std::default::Default::default()
}
// string name = 1;
pub fn get_name(&self) -> &str {
&self.name
}
pub fn clear_name(&mut self) {
self.name.clear();
}
// Param is passed by value, moved
pub fn set_name(&mut self, v: ::std::string::String) {
self.name = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_name(&mut self) -> &mut ::std::string::String {
&mut self.name
}
// Take field
pub fn take_name(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.name, ::std::string::String::new())
}
// int64 version = 2;
pub fn get_version(&self) -> i64 {
self.version
}
pub fn clear_version(&mut self) {
self.version = 0;
}
// Param is passed by value, moved
pub fn set_version(&mut self, v: i64) {
self.version = v;
}
}
impl ::protobuf::Message for SessionMetadata {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.name)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.version = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.name.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.name);
}
if self.version != 0 {
my_size += ::protobuf::rt::value_size(2, self.version, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.name.is_empty() {
os.write_string(1, &self.name)?;
}
if self.version != 0 {
os.write_int64(2, self.version)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> SessionMetadata {
SessionMetadata::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"name",
|m: &SessionMetadata| { &m.name },
|m: &mut SessionMetadata| { &mut m.name },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"version",
|m: &SessionMetadata| { &m.version },
|m: &mut SessionMetadata| { &mut m.version },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<SessionMetadata>(
"SessionMetadata",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static SessionMetadata {
static instance: ::protobuf::rt::LazyV2<SessionMetadata> = ::protobuf::rt::LazyV2::INIT;
instance.get(SessionMetadata::new)
}
}
impl ::protobuf::Clear for SessionMetadata {
fn clear(&mut self) {
self.name.clear();
self.version = 0;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for SessionMetadata {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for SessionMetadata {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct ConfigProto {
// message fields
pub device_count: ::std::collections::HashMap<::std::string::String, i32>,
pub intra_op_parallelism_threads: i32,
pub inter_op_parallelism_threads: i32,
pub use_per_session_threads: bool,
pub session_inter_op_thread_pool: ::protobuf::RepeatedField<ThreadPoolOptionProto>,
pub placement_period: i32,
pub device_filters: ::protobuf::RepeatedField<::std::string::String>,
pub gpu_options: ::protobuf::SingularPtrField<GPUOptions>,
pub allow_soft_placement: bool,
pub log_device_placement: bool,
pub graph_options: ::protobuf::SingularPtrField<GraphOptions>,
pub operation_timeout_in_ms: i64,
pub rpc_options: ::protobuf::SingularPtrField<RPCOptions>,
pub cluster_def: ::protobuf::SingularPtrField<super::cluster::ClusterDef>,
pub isolate_session_state: bool,
pub share_cluster_devices_in_session: bool,
pub experimental: ::protobuf::SingularPtrField<ConfigProto_Experimental>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ConfigProto {
fn default() -> &'a ConfigProto {
<ConfigProto as ::protobuf::Message>::default_instance()
}
}
impl ConfigProto {
pub fn new() -> ConfigProto {
::std::default::Default::default()
}
// repeated .tensorflow.ConfigProto.DeviceCountEntry device_count = 1;
pub fn get_device_count(&self) -> &::std::collections::HashMap<::std::string::String, i32> {
&self.device_count
}
pub fn clear_device_count(&mut self) {
self.device_count.clear();
}
// Param is passed by value, moved
pub fn set_device_count(&mut self, v: ::std::collections::HashMap<::std::string::String, i32>) {
self.device_count = v;
}
// Mutable pointer to the field.
pub fn mut_device_count(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, i32> {
&mut self.device_count
}
// Take field
pub fn take_device_count(&mut self) -> ::std::collections::HashMap<::std::string::String, i32> {
::std::mem::replace(&mut self.device_count, ::std::collections::HashMap::new())
}
// int32 intra_op_parallelism_threads = 2;
pub fn get_intra_op_parallelism_threads(&self) -> i32 {
self.intra_op_parallelism_threads
}
pub fn clear_intra_op_parallelism_threads(&mut self) {
self.intra_op_parallelism_threads = 0;
}
// Param is passed by value, moved
pub fn set_intra_op_parallelism_threads(&mut self, v: i32) {
self.intra_op_parallelism_threads = v;
}
// int32 inter_op_parallelism_threads = 5;
pub fn get_inter_op_parallelism_threads(&self) -> i32 {
self.inter_op_parallelism_threads
}
pub fn clear_inter_op_parallelism_threads(&mut self) {
self.inter_op_parallelism_threads = 0;
}
// Param is passed by value, moved
pub fn set_inter_op_parallelism_threads(&mut self, v: i32) {
self.inter_op_parallelism_threads = v;
}
// bool use_per_session_threads = 9;
pub fn get_use_per_session_threads(&self) -> bool {
self.use_per_session_threads
}
pub fn clear_use_per_session_threads(&mut self) {
self.use_per_session_threads = false;
}
// Param is passed by value, moved
pub fn set_use_per_session_threads(&mut self, v: bool) {
self.use_per_session_threads = v;
}
// repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
pub fn get_session_inter_op_thread_pool(&self) -> &[ThreadPoolOptionProto] {
&self.session_inter_op_thread_pool
}
pub fn clear_session_inter_op_thread_pool(&mut self) {
self.session_inter_op_thread_pool.clear();
}
// Param is passed by value, moved
pub fn set_session_inter_op_thread_pool(&mut self, v: ::protobuf::RepeatedField<ThreadPoolOptionProto>) {
self.session_inter_op_thread_pool = v;
}
// Mutable pointer to the field.
pub fn mut_session_inter_op_thread_pool(&mut self) -> &mut ::protobuf::RepeatedField<ThreadPoolOptionProto> {
&mut self.session_inter_op_thread_pool
}
// Take field
pub fn take_session_inter_op_thread_pool(&mut self) -> ::protobuf::RepeatedField<ThreadPoolOptionProto> {
::std::mem::replace(&mut self.session_inter_op_thread_pool, ::protobuf::RepeatedField::new())
}
// int32 placement_period = 3;
pub fn get_placement_period(&self) -> i32 {
self.placement_period
}
pub fn clear_placement_period(&mut self) {
self.placement_period = 0;
}
// Param is passed by value, moved
pub fn set_placement_period(&mut self, v: i32) {
self.placement_period = v;
}
// repeated string device_filters = 4;
pub fn get_device_filters(&self) -> &[::std::string::String] {
&self.device_filters
}
pub fn clear_device_filters(&mut self) {
self.device_filters.clear();
}
// Param is passed by value, moved
pub fn set_device_filters(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
self.device_filters = v;
}
// Mutable pointer to the field.
pub fn mut_device_filters(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
&mut self.device_filters
}
// Take field
pub fn take_device_filters(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
::std::mem::replace(&mut self.device_filters, ::protobuf::RepeatedField::new())
}
// .tensorflow.GPUOptions gpu_options = 6;
pub fn get_gpu_options(&self) -> &GPUOptions {
self.gpu_options.as_ref().unwrap_or_else(|| <GPUOptions as ::protobuf::Message>::default_instance())
}
pub fn clear_gpu_options(&mut self) {
self.gpu_options.clear();
}
pub fn has_gpu_options(&self) -> bool {
self.gpu_options.is_some()
}
// Param is passed by value, moved
pub fn set_gpu_options(&mut self, v: GPUOptions) {
self.gpu_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_gpu_options(&mut self) -> &mut GPUOptions {
if self.gpu_options.is_none() {
self.gpu_options.set_default();
}
self.gpu_options.as_mut().unwrap()
}
// Take field
pub fn take_gpu_options(&mut self) -> GPUOptions {
self.gpu_options.take().unwrap_or_else(|| GPUOptions::new())
}
// bool allow_soft_placement = 7;
pub fn get_allow_soft_placement(&self) -> bool {
self.allow_soft_placement
}
pub fn clear_allow_soft_placement(&mut self) {
self.allow_soft_placement = false;
}
// Param is passed by value, moved
pub fn set_allow_soft_placement(&mut self, v: bool) {
self.allow_soft_placement = v;
}
// bool log_device_placement = 8;
pub fn get_log_device_placement(&self) -> bool {
self.log_device_placement
}
pub fn clear_log_device_placement(&mut self) {
self.log_device_placement = false;
}
// Param is passed by value, moved
pub fn set_log_device_placement(&mut self, v: bool) {
self.log_device_placement = v;
}
// .tensorflow.GraphOptions graph_options = 10;
pub fn get_graph_options(&self) -> &GraphOptions {
self.graph_options.as_ref().unwrap_or_else(|| <GraphOptions as ::protobuf::Message>::default_instance())
}
pub fn clear_graph_options(&mut self) {
self.graph_options.clear();
}
pub fn has_graph_options(&self) -> bool {
self.graph_options.is_some()
}
// Param is passed by value, moved
pub fn set_graph_options(&mut self, v: GraphOptions) {
self.graph_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_graph_options(&mut self) -> &mut GraphOptions {
if self.graph_options.is_none() {
self.graph_options.set_default();
}
self.graph_options.as_mut().unwrap()
}
// Take field
pub fn take_graph_options(&mut self) -> GraphOptions {
self.graph_options.take().unwrap_or_else(|| GraphOptions::new())
}
// int64 operation_timeout_in_ms = 11;
pub fn get_operation_timeout_in_ms(&self) -> i64 {
self.operation_timeout_in_ms
}
pub fn clear_operation_timeout_in_ms(&mut self) {
self.operation_timeout_in_ms = 0;
}
// Param is passed by value, moved
pub fn set_operation_timeout_in_ms(&mut self, v: i64) {
self.operation_timeout_in_ms = v;
}
// .tensorflow.RPCOptions rpc_options = 13;
pub fn get_rpc_options(&self) -> &RPCOptions {
self.rpc_options.as_ref().unwrap_or_else(|| <RPCOptions as ::protobuf::Message>::default_instance())
}
pub fn clear_rpc_options(&mut self) {
self.rpc_options.clear();
}
pub fn has_rpc_options(&self) -> bool {
self.rpc_options.is_some()
}
// Param is passed by value, moved
pub fn set_rpc_options(&mut self, v: RPCOptions) {
self.rpc_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_rpc_options(&mut self) -> &mut RPCOptions {
if self.rpc_options.is_none() {
self.rpc_options.set_default();
}
self.rpc_options.as_mut().unwrap()
}
// Take field
pub fn take_rpc_options(&mut self) -> RPCOptions {
self.rpc_options.take().unwrap_or_else(|| RPCOptions::new())
}
// .tensorflow.ClusterDef cluster_def = 14;
pub fn get_cluster_def(&self) -> &super::cluster::ClusterDef {
self.cluster_def.as_ref().unwrap_or_else(|| <super::cluster::ClusterDef as ::protobuf::Message>::default_instance())
}
pub fn clear_cluster_def(&mut self) {
self.cluster_def.clear();
}
pub fn has_cluster_def(&self) -> bool {
self.cluster_def.is_some()
}
// Param is passed by value, moved
pub fn set_cluster_def(&mut self, v: super::cluster::ClusterDef) {
self.cluster_def = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_cluster_def(&mut self) -> &mut super::cluster::ClusterDef {
if self.cluster_def.is_none() {
self.cluster_def.set_default();
}
self.cluster_def.as_mut().unwrap()
}
// Take field
pub fn take_cluster_def(&mut self) -> super::cluster::ClusterDef {
self.cluster_def.take().unwrap_or_else(|| super::cluster::ClusterDef::new())
}
// bool isolate_session_state = 15;
pub fn get_isolate_session_state(&self) -> bool {
self.isolate_session_state
}
pub fn clear_isolate_session_state(&mut self) {
self.isolate_session_state = false;
}
// Param is passed by value, moved
pub fn set_isolate_session_state(&mut self, v: bool) {
self.isolate_session_state = v;
}
// bool share_cluster_devices_in_session = 17;
pub fn get_share_cluster_devices_in_session(&self) -> bool {
self.share_cluster_devices_in_session
}
pub fn clear_share_cluster_devices_in_session(&mut self) {
self.share_cluster_devices_in_session = false;
}
// Param is passed by value, moved
pub fn set_share_cluster_devices_in_session(&mut self, v: bool) {
self.share_cluster_devices_in_session = v;
}
// .tensorflow.ConfigProto.Experimental experimental = 16;
pub fn get_experimental(&self) -> &ConfigProto_Experimental {
self.experimental.as_ref().unwrap_or_else(|| <ConfigProto_Experimental as ::protobuf::Message>::default_instance())
}
pub fn clear_experimental(&mut self) {
self.experimental.clear();
}
pub fn has_experimental(&self) -> bool {
self.experimental.is_some()
}
// Param is passed by value, moved
pub fn set_experimental(&mut self, v: ConfigProto_Experimental) {
self.experimental = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_experimental(&mut self) -> &mut ConfigProto_Experimental {
if self.experimental.is_none() {
self.experimental.set_default();
}
self.experimental.as_mut().unwrap()
}
// Take field
pub fn take_experimental(&mut self) -> ConfigProto_Experimental {
self.experimental.take().unwrap_or_else(|| ConfigProto_Experimental::new())
}
}
impl ::protobuf::Message for ConfigProto {
fn is_initialized(&self) -> bool {
for v in &self.session_inter_op_thread_pool {
if !v.is_initialized() {
return false;
}
};
for v in &self.gpu_options {
if !v.is_initialized() {
return false;
}
};
for v in &self.graph_options {
if !v.is_initialized() {
return false;
}
};
for v in &self.rpc_options {
if !v.is_initialized() {
return false;
}
};
for v in &self.cluster_def {
if !v.is_initialized() {
return false;
}
};
for v in &self.experimental {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(wire_type, is, &mut self.device_count)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.intra_op_parallelism_threads = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.inter_op_parallelism_threads = tmp;
},
9 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.use_per_session_threads = tmp;
},
12 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.session_inter_op_thread_pool)?;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.placement_period = tmp;
},
4 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.device_filters)?;
},
6 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.gpu_options)?;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.allow_soft_placement = tmp;
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.log_device_placement = tmp;
},
10 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.graph_options)?;
},
11 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.operation_timeout_in_ms = tmp;
},
13 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.rpc_options)?;
},
14 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.cluster_def)?;
},
15 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.isolate_session_state = tmp;
},
17 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.share_cluster_devices_in_session = tmp;
},
16 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.experimental)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(1, &self.device_count);
if self.intra_op_parallelism_threads != 0 {
my_size += ::protobuf::rt::value_size(2, self.intra_op_parallelism_threads, ::protobuf::wire_format::WireTypeVarint);
}
if self.inter_op_parallelism_threads != 0 {
my_size += ::protobuf::rt::value_size(5, self.inter_op_parallelism_threads, ::protobuf::wire_format::WireTypeVarint);
}
if self.use_per_session_threads != false {
my_size += 2;
}
for value in &self.session_inter_op_thread_pool {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if self.placement_period != 0 {
my_size += ::protobuf::rt::value_size(3, self.placement_period, ::protobuf::wire_format::WireTypeVarint);
}
for value in &self.device_filters {
my_size += ::protobuf::rt::string_size(4, &value);
};
if let Some(ref v) = self.gpu_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if self.allow_soft_placement != false {
my_size += 2;
}
if self.log_device_placement != false {
my_size += 2;
}
if let Some(ref v) = self.graph_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if self.operation_timeout_in_ms != 0 {
my_size += ::protobuf::rt::value_size(11, self.operation_timeout_in_ms, ::protobuf::wire_format::WireTypeVarint);
}
if let Some(ref v) = self.rpc_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.cluster_def.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if self.isolate_session_state != false {
my_size += 2;
}
if self.share_cluster_devices_in_session != false {
my_size += 3;
}
if let Some(ref v) = self.experimental.as_ref() {
let len = v.compute_size();
my_size += 2 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(1, &self.device_count, os)?;
if self.intra_op_parallelism_threads != 0 {
os.write_int32(2, self.intra_op_parallelism_threads)?;
}
if self.inter_op_parallelism_threads != 0 {
os.write_int32(5, self.inter_op_parallelism_threads)?;
}
if self.use_per_session_threads != false {
os.write_bool(9, self.use_per_session_threads)?;
}
for v in &self.session_inter_op_thread_pool {
os.write_tag(12, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if self.placement_period != 0 {
os.write_int32(3, self.placement_period)?;
}
for v in &self.device_filters {
os.write_string(4, &v)?;
};
if let Some(ref v) = self.gpu_options.as_ref() {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if self.allow_soft_placement != false {
os.write_bool(7, self.allow_soft_placement)?;
}
if self.log_device_placement != false {
os.write_bool(8, self.log_device_placement)?;
}
if let Some(ref v) = self.graph_options.as_ref() {
os.write_tag(10, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if self.operation_timeout_in_ms != 0 {
os.write_int64(11, self.operation_timeout_in_ms)?;
}
if let Some(ref v) = self.rpc_options.as_ref() {
os.write_tag(13, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.cluster_def.as_ref() {
os.write_tag(14, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if self.isolate_session_state != false {
os.write_bool(15, self.isolate_session_state)?;
}
if self.share_cluster_devices_in_session != false {
os.write_bool(17, self.share_cluster_devices_in_session)?;
}
if let Some(ref v) = self.experimental.as_ref() {
os.write_tag(16, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ConfigProto {
ConfigProto::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(
"device_count",
|m: &ConfigProto| { &m.device_count },
|m: &mut ConfigProto| { &mut m.device_count },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"intra_op_parallelism_threads",
|m: &ConfigProto| { &m.intra_op_parallelism_threads },
|m: &mut ConfigProto| { &mut m.intra_op_parallelism_threads },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"inter_op_parallelism_threads",
|m: &ConfigProto| { &m.inter_op_parallelism_threads },
|m: &mut ConfigProto| { &mut m.inter_op_parallelism_threads },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"use_per_session_threads",
|m: &ConfigProto| { &m.use_per_session_threads },
|m: &mut ConfigProto| { &mut m.use_per_session_threads },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<ThreadPoolOptionProto>>(
"session_inter_op_thread_pool",
|m: &ConfigProto| { &m.session_inter_op_thread_pool },
|m: &mut ConfigProto| { &mut m.session_inter_op_thread_pool },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"placement_period",
|m: &ConfigProto| { &m.placement_period },
|m: &mut ConfigProto| { &mut m.placement_period },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"device_filters",
|m: &ConfigProto| { &m.device_filters },
|m: &mut ConfigProto| { &mut m.device_filters },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GPUOptions>>(
"gpu_options",
|m: &ConfigProto| { &m.gpu_options },
|m: &mut ConfigProto| { &mut m.gpu_options },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"allow_soft_placement",
|m: &ConfigProto| { &m.allow_soft_placement },
|m: &mut ConfigProto| { &mut m.allow_soft_placement },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"log_device_placement",
|m: &ConfigProto| { &m.log_device_placement },
|m: &mut ConfigProto| { &mut m.log_device_placement },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GraphOptions>>(
"graph_options",
|m: &ConfigProto| { &m.graph_options },
|m: &mut ConfigProto| { &mut m.graph_options },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"operation_timeout_in_ms",
|m: &ConfigProto| { &m.operation_timeout_in_ms },
|m: &mut ConfigProto| { &mut m.operation_timeout_in_ms },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RPCOptions>>(
"rpc_options",
|m: &ConfigProto| { &m.rpc_options },
|m: &mut ConfigProto| { &mut m.rpc_options },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::cluster::ClusterDef>>(
"cluster_def",
|m: &ConfigProto| { &m.cluster_def },
|m: &mut ConfigProto| { &mut m.cluster_def },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"isolate_session_state",
|m: &ConfigProto| { &m.isolate_session_state },
|m: &mut ConfigProto| { &mut m.isolate_session_state },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"share_cluster_devices_in_session",
|m: &ConfigProto| { &m.share_cluster_devices_in_session },
|m: &mut ConfigProto| { &mut m.share_cluster_devices_in_session },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<ConfigProto_Experimental>>(
"experimental",
|m: &ConfigProto| { &m.experimental },
|m: &mut ConfigProto| { &mut m.experimental },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<ConfigProto>(
"ConfigProto",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static ConfigProto {
static instance: ::protobuf::rt::LazyV2<ConfigProto> = ::protobuf::rt::LazyV2::INIT;
instance.get(ConfigProto::new)
}
}
impl ::protobuf::Clear for ConfigProto {
fn clear(&mut self) {
self.device_count.clear();
self.intra_op_parallelism_threads = 0;
self.inter_op_parallelism_threads = 0;
self.use_per_session_threads = false;
self.session_inter_op_thread_pool.clear();
self.placement_period = 0;
self.device_filters.clear();
self.gpu_options.clear();
self.allow_soft_placement = false;
self.log_device_placement = false;
self.graph_options.clear();
self.operation_timeout_in_ms = 0;
self.rpc_options.clear();
self.cluster_def.clear();
self.isolate_session_state = false;
self.share_cluster_devices_in_session = false;
self.experimental.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for ConfigProto {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for ConfigProto {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct ConfigProto_Experimental {
// message fields
pub collective_group_leader: ::std::string::String,
pub executor_type: ::std::string::String,
pub recv_buf_max_chunk: i32,
pub use_numa_affinity: bool,
pub collective_deterministic_sequential_execution: bool,
pub collective_nccl: bool,
pub share_session_state_in_clusterspec_propagation: bool,
pub disable_thread_spinning: bool,
pub share_cluster_devices_in_session: bool,
pub session_metadata: ::protobuf::SingularPtrField<SessionMetadata>,
pub optimize_for_static_graph: bool,
pub enable_mlir_bridge: bool,
pub mlir_bridge_rollout: ConfigProto_Experimental_MlirBridgeRollout,
pub enable_mlir_graph_optimization: bool,
pub disable_output_partition_graphs: bool,
pub xla_fusion_autotuner_thresh: i64,
pub use_tfrt: bool,
pub disable_functional_ops_lowering: bool,
pub xla_prefer_single_graph_cluster: bool,
pub coordination_config: ::protobuf::SingularPtrField<super::coordination_config::CoordinationServiceConfig>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ConfigProto_Experimental {
fn default() -> &'a ConfigProto_Experimental {
<ConfigProto_Experimental as ::protobuf::Message>::default_instance()
}
}
impl ConfigProto_Experimental {
pub fn new() -> ConfigProto_Experimental {
::std::default::Default::default()
}
// string collective_group_leader = 1;
pub fn get_collective_group_leader(&self) -> &str {
&self.collective_group_leader
}
pub fn clear_collective_group_leader(&mut self) {
self.collective_group_leader.clear();
}
// Param is passed by value, moved
pub fn set_collective_group_leader(&mut self, v: ::std::string::String) {
self.collective_group_leader = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_collective_group_leader(&mut self) -> &mut ::std::string::String {
&mut self.collective_group_leader
}
// Take field
pub fn take_collective_group_leader(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.collective_group_leader, ::std::string::String::new())
}
// string executor_type = 3;
pub fn get_executor_type(&self) -> &str {
&self.executor_type
}
pub fn clear_executor_type(&mut self) {
self.executor_type.clear();
}
// Param is passed by value, moved
pub fn set_executor_type(&mut self, v: ::std::string::String) {
self.executor_type = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_executor_type(&mut self) -> &mut ::std::string::String {
&mut self.executor_type
}
// Take field
pub fn take_executor_type(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.executor_type, ::std::string::String::new())
}
// int32 recv_buf_max_chunk = 4;
pub fn get_recv_buf_max_chunk(&self) -> i32 {
self.recv_buf_max_chunk
}
pub fn clear_recv_buf_max_chunk(&mut self) {
self.recv_buf_max_chunk = 0;
}
// Param is passed by value, moved
pub fn set_recv_buf_max_chunk(&mut self, v: i32) {
self.recv_buf_max_chunk = v;
}
// bool use_numa_affinity = 5;
pub fn get_use_numa_affinity(&self) -> bool {
self.use_numa_affinity
}
pub fn clear_use_numa_affinity(&mut self) {
self.use_numa_affinity = false;
}
// Param is passed by value, moved
pub fn set_use_numa_affinity(&mut self, v: bool) {
self.use_numa_affinity = v;
}
// bool collective_deterministic_sequential_execution = 6;
pub fn get_collective_deterministic_sequential_execution(&self) -> bool {
self.collective_deterministic_sequential_execution
}
pub fn clear_collective_deterministic_sequential_execution(&mut self) {
self.collective_deterministic_sequential_execution = false;
}
// Param is passed by value, moved
pub fn set_collective_deterministic_sequential_execution(&mut self, v: bool) {
self.collective_deterministic_sequential_execution = v;
}
// bool collective_nccl = 7;
pub fn get_collective_nccl(&self) -> bool {
self.collective_nccl
}
pub fn clear_collective_nccl(&mut self) {
self.collective_nccl = false;
}
// Param is passed by value, moved
pub fn set_collective_nccl(&mut self, v: bool) {
self.collective_nccl = v;
}
// bool share_session_state_in_clusterspec_propagation = 8;
pub fn get_share_session_state_in_clusterspec_propagation(&self) -> bool {
self.share_session_state_in_clusterspec_propagation
}
pub fn clear_share_session_state_in_clusterspec_propagation(&mut self) {
self.share_session_state_in_clusterspec_propagation = false;
}
// Param is passed by value, moved
pub fn set_share_session_state_in_clusterspec_propagation(&mut self, v: bool) {
self.share_session_state_in_clusterspec_propagation = v;
}
// bool disable_thread_spinning = 9;
pub fn get_disable_thread_spinning(&self) -> bool {
self.disable_thread_spinning
}
pub fn clear_disable_thread_spinning(&mut self) {
self.disable_thread_spinning = false;
}
// Param is passed by value, moved
pub fn set_disable_thread_spinning(&mut self, v: bool) {
self.disable_thread_spinning = v;
}
// bool share_cluster_devices_in_session = 10;
pub fn get_share_cluster_devices_in_session(&self) -> bool {
self.share_cluster_devices_in_session
}
pub fn clear_share_cluster_devices_in_session(&mut self) {
self.share_cluster_devices_in_session = false;
}
// Param is passed by value, moved
pub fn set_share_cluster_devices_in_session(&mut self, v: bool) {
self.share_cluster_devices_in_session = v;
}
// .tensorflow.SessionMetadata session_metadata = 11;
pub fn get_session_metadata(&self) -> &SessionMetadata {
self.session_metadata.as_ref().unwrap_or_else(|| <SessionMetadata as ::protobuf::Message>::default_instance())
}
pub fn clear_session_metadata(&mut self) {
self.session_metadata.clear();
}
pub fn has_session_metadata(&self) -> bool {
self.session_metadata.is_some()
}
// Param is passed by value, moved
pub fn set_session_metadata(&mut self, v: SessionMetadata) {
self.session_metadata = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_session_metadata(&mut self) -> &mut SessionMetadata {
if self.session_metadata.is_none() {
self.session_metadata.set_default();
}
self.session_metadata.as_mut().unwrap()
}
// Take field
pub fn take_session_metadata(&mut self) -> SessionMetadata {
self.session_metadata.take().unwrap_or_else(|| SessionMetadata::new())
}
// bool optimize_for_static_graph = 12;
pub fn get_optimize_for_static_graph(&self) -> bool {
self.optimize_for_static_graph
}
pub fn clear_optimize_for_static_graph(&mut self) {
self.optimize_for_static_graph = false;
}
// Param is passed by value, moved
pub fn set_optimize_for_static_graph(&mut self, v: bool) {
self.optimize_for_static_graph = v;
}
// bool enable_mlir_bridge = 13;
pub fn get_enable_mlir_bridge(&self) -> bool {
self.enable_mlir_bridge
}
pub fn clear_enable_mlir_bridge(&mut self) {
self.enable_mlir_bridge = false;
}
// Param is passed by value, moved
pub fn set_enable_mlir_bridge(&mut self, v: bool) {
self.enable_mlir_bridge = v;
}
// .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
pub fn get_mlir_bridge_rollout(&self) -> ConfigProto_Experimental_MlirBridgeRollout {
self.mlir_bridge_rollout
}
pub fn clear_mlir_bridge_rollout(&mut self) {
self.mlir_bridge_rollout = ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
}
// Param is passed by value, moved
pub fn set_mlir_bridge_rollout(&mut self, v: ConfigProto_Experimental_MlirBridgeRollout) {
self.mlir_bridge_rollout = v;
}
// bool enable_mlir_graph_optimization = 16;
pub fn get_enable_mlir_graph_optimization(&self) -> bool {
self.enable_mlir_graph_optimization
}
pub fn clear_enable_mlir_graph_optimization(&mut self) {
self.enable_mlir_graph_optimization = false;
}
// Param is passed by value, moved
pub fn set_enable_mlir_graph_optimization(&mut self, v: bool) {
self.enable_mlir_graph_optimization = v;
}
// bool disable_output_partition_graphs = 14;
pub fn get_disable_output_partition_graphs(&self) -> bool {
self.disable_output_partition_graphs
}
pub fn clear_disable_output_partition_graphs(&mut self) {
self.disable_output_partition_graphs = false;
}
// Param is passed by value, moved
pub fn set_disable_output_partition_graphs(&mut self, v: bool) {
self.disable_output_partition_graphs = v;
}
// int64 xla_fusion_autotuner_thresh = 15;
pub fn get_xla_fusion_autotuner_thresh(&self) -> i64 {
self.xla_fusion_autotuner_thresh
}
pub fn clear_xla_fusion_autotuner_thresh(&mut self) {
self.xla_fusion_autotuner_thresh = 0;
}
// Param is passed by value, moved
pub fn set_xla_fusion_autotuner_thresh(&mut self, v: i64) {
self.xla_fusion_autotuner_thresh = v;
}
// bool use_tfrt = 18;
pub fn get_use_tfrt(&self) -> bool {
self.use_tfrt
}
pub fn clear_use_tfrt(&mut self) {
self.use_tfrt = false;
}
// Param is passed by value, moved
pub fn set_use_tfrt(&mut self, v: bool) {
self.use_tfrt = v;
}
// bool disable_functional_ops_lowering = 21;
pub fn get_disable_functional_ops_lowering(&self) -> bool {
self.disable_functional_ops_lowering
}
pub fn clear_disable_functional_ops_lowering(&mut self) {
self.disable_functional_ops_lowering = false;
}
// Param is passed by value, moved
pub fn set_disable_functional_ops_lowering(&mut self, v: bool) {
self.disable_functional_ops_lowering = v;
}
// bool xla_prefer_single_graph_cluster = 22;
pub fn get_xla_prefer_single_graph_cluster(&self) -> bool {
self.xla_prefer_single_graph_cluster
}
pub fn clear_xla_prefer_single_graph_cluster(&mut self) {
self.xla_prefer_single_graph_cluster = false;
}
// Param is passed by value, moved
pub fn set_xla_prefer_single_graph_cluster(&mut self, v: bool) {
self.xla_prefer_single_graph_cluster = v;
}
// .tensorflow.CoordinationServiceConfig coordination_config = 23;
pub fn get_coordination_config(&self) -> &super::coordination_config::CoordinationServiceConfig {
self.coordination_config.as_ref().unwrap_or_else(|| <super::coordination_config::CoordinationServiceConfig as ::protobuf::Message>::default_instance())
}
pub fn clear_coordination_config(&mut self) {
self.coordination_config.clear();
}
pub fn has_coordination_config(&self) -> bool {
self.coordination_config.is_some()
}
// Param is passed by value, moved
pub fn set_coordination_config(&mut self, v: super::coordination_config::CoordinationServiceConfig) {
self.coordination_config = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_coordination_config(&mut self) -> &mut super::coordination_config::CoordinationServiceConfig {
if self.coordination_config.is_none() {
self.coordination_config.set_default();
}
self.coordination_config.as_mut().unwrap()
}
// Take field
pub fn take_coordination_config(&mut self) -> super::coordination_config::CoordinationServiceConfig {
self.coordination_config.take().unwrap_or_else(|| super::coordination_config::CoordinationServiceConfig::new())
}
}
impl ::protobuf::Message for ConfigProto_Experimental {
fn is_initialized(&self) -> bool {
for v in &self.session_metadata {
if !v.is_initialized() {
return false;
}
};
for v in &self.coordination_config {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.collective_group_leader)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.executor_type)?;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.recv_buf_max_chunk = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.use_numa_affinity = tmp;
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.collective_deterministic_sequential_execution = tmp;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.collective_nccl = tmp;
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.share_session_state_in_clusterspec_propagation = tmp;
},
9 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.disable_thread_spinning = tmp;
},
10 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.share_cluster_devices_in_session = tmp;
},
11 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.session_metadata)?;
},
12 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.optimize_for_static_graph = tmp;
},
13 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.enable_mlir_bridge = tmp;
},
17 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.mlir_bridge_rollout, 17, &mut self.unknown_fields)?
},
16 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.enable_mlir_graph_optimization = tmp;
},
14 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.disable_output_partition_graphs = tmp;
},
15 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.xla_fusion_autotuner_thresh = tmp;
},
18 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.use_tfrt = tmp;
},
21 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.disable_functional_ops_lowering = tmp;
},
22 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.xla_prefer_single_graph_cluster = tmp;
},
23 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.coordination_config)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.collective_group_leader.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.collective_group_leader);
}
if !self.executor_type.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.executor_type);
}
if self.recv_buf_max_chunk != 0 {
my_size += ::protobuf::rt::value_size(4, self.recv_buf_max_chunk, ::protobuf::wire_format::WireTypeVarint);
}
if self.use_numa_affinity != false {
my_size += 2;
}
if self.collective_deterministic_sequential_execution != false {
my_size += 2;
}
if self.collective_nccl != false {
my_size += 2;
}
if self.share_session_state_in_clusterspec_propagation != false {
my_size += 2;
}
if self.disable_thread_spinning != false {
my_size += 2;
}
if self.share_cluster_devices_in_session != false {
my_size += 2;
}
if let Some(ref v) = self.session_metadata.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if self.optimize_for_static_graph != false {
my_size += 2;
}
if self.enable_mlir_bridge != false {
my_size += 2;
}
if self.mlir_bridge_rollout != ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED {
my_size += ::protobuf::rt::enum_size(17, self.mlir_bridge_rollout);
}
if self.enable_mlir_graph_optimization != false {
my_size += 3;
}
if self.disable_output_partition_graphs != false {
my_size += 2;
}
if self.xla_fusion_autotuner_thresh != 0 {
my_size += ::protobuf::rt::value_size(15, self.xla_fusion_autotuner_thresh, ::protobuf::wire_format::WireTypeVarint);
}
if self.use_tfrt != false {
my_size += 3;
}
if self.disable_functional_ops_lowering != false {
my_size += 3;
}
if self.xla_prefer_single_graph_cluster != false {
my_size += 3;
}
if let Some(ref v) = self.coordination_config.as_ref() {
let len = v.compute_size();
my_size += 2 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.collective_group_leader.is_empty() {
os.write_string(1, &self.collective_group_leader)?;
}
if !self.executor_type.is_empty() {
os.write_string(3, &self.executor_type)?;
}
if self.recv_buf_max_chunk != 0 {
os.write_int32(4, self.recv_buf_max_chunk)?;
}
if self.use_numa_affinity != false {
os.write_bool(5, self.use_numa_affinity)?;
}
if self.collective_deterministic_sequential_execution != false {
os.write_bool(6, self.collective_deterministic_sequential_execution)?;
}
if self.collective_nccl != false {
os.write_bool(7, self.collective_nccl)?;
}
if self.share_session_state_in_clusterspec_propagation != false {
os.write_bool(8, self.share_session_state_in_clusterspec_propagation)?;
}
if self.disable_thread_spinning != false {
os.write_bool(9, self.disable_thread_spinning)?;
}
if self.share_cluster_devices_in_session != false {
os.write_bool(10, self.share_cluster_devices_in_session)?;
}
if let Some(ref v) = self.session_metadata.as_ref() {
os.write_tag(11, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if self.optimize_for_static_graph != false {
os.write_bool(12, self.optimize_for_static_graph)?;
}
if self.enable_mlir_bridge != false {
os.write_bool(13, self.enable_mlir_bridge)?;
}
if self.mlir_bridge_rollout != ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED {
os.write_enum(17, ::protobuf::ProtobufEnum::value(&self.mlir_bridge_rollout))?;
}
if self.enable_mlir_graph_optimization != false {
os.write_bool(16, self.enable_mlir_graph_optimization)?;
}
if self.disable_output_partition_graphs != false {
os.write_bool(14, self.disable_output_partition_graphs)?;
}
if self.xla_fusion_autotuner_thresh != 0 {
os.write_int64(15, self.xla_fusion_autotuner_thresh)?;
}
if self.use_tfrt != false {
os.write_bool(18, self.use_tfrt)?;
}
if self.disable_functional_ops_lowering != false {
os.write_bool(21, self.disable_functional_ops_lowering)?;
}
if self.xla_prefer_single_graph_cluster != false {
os.write_bool(22, self.xla_prefer_single_graph_cluster)?;
}
if let Some(ref v) = self.coordination_config.as_ref() {
os.write_tag(23, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ConfigProto_Experimental {
ConfigProto_Experimental::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"collective_group_leader",
|m: &ConfigProto_Experimental| { &m.collective_group_leader },
|m: &mut ConfigProto_Experimental| { &mut m.collective_group_leader },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"executor_type",
|m: &ConfigProto_Experimental| { &m.executor_type },
|m: &mut ConfigProto_Experimental| { &mut m.executor_type },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"recv_buf_max_chunk",
|m: &ConfigProto_Experimental| { &m.recv_buf_max_chunk },
|m: &mut ConfigProto_Experimental| { &mut m.recv_buf_max_chunk },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"use_numa_affinity",
|m: &ConfigProto_Experimental| { &m.use_numa_affinity },
|m: &mut ConfigProto_Experimental| { &mut m.use_numa_affinity },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"collective_deterministic_sequential_execution",
|m: &ConfigProto_Experimental| { &m.collective_deterministic_sequential_execution },
|m: &mut ConfigProto_Experimental| { &mut m.collective_deterministic_sequential_execution },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"collective_nccl",
|m: &ConfigProto_Experimental| { &m.collective_nccl },
|m: &mut ConfigProto_Experimental| { &mut m.collective_nccl },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"share_session_state_in_clusterspec_propagation",
|m: &ConfigProto_Experimental| { &m.share_session_state_in_clusterspec_propagation },
|m: &mut ConfigProto_Experimental| { &mut m.share_session_state_in_clusterspec_propagation },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"disable_thread_spinning",
|m: &ConfigProto_Experimental| { &m.disable_thread_spinning },
|m: &mut ConfigProto_Experimental| { &mut m.disable_thread_spinning },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"share_cluster_devices_in_session",
|m: &ConfigProto_Experimental| { &m.share_cluster_devices_in_session },
|m: &mut ConfigProto_Experimental| { &mut m.share_cluster_devices_in_session },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<SessionMetadata>>(
"session_metadata",
|m: &ConfigProto_Experimental| { &m.session_metadata },
|m: &mut ConfigProto_Experimental| { &mut m.session_metadata },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"optimize_for_static_graph",
|m: &ConfigProto_Experimental| { &m.optimize_for_static_graph },
|m: &mut ConfigProto_Experimental| { &mut m.optimize_for_static_graph },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"enable_mlir_bridge",
|m: &ConfigProto_Experimental| { &m.enable_mlir_bridge },
|m: &mut ConfigProto_Experimental| { &mut m.enable_mlir_bridge },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<ConfigProto_Experimental_MlirBridgeRollout>>(
"mlir_bridge_rollout",
|m: &ConfigProto_Experimental| { &m.mlir_bridge_rollout },
|m: &mut ConfigProto_Experimental| { &mut m.mlir_bridge_rollout },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"enable_mlir_graph_optimization",
|m: &ConfigProto_Experimental| { &m.enable_mlir_graph_optimization },
|m: &mut ConfigProto_Experimental| { &mut m.enable_mlir_graph_optimization },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"disable_output_partition_graphs",
|m: &ConfigProto_Experimental| { &m.disable_output_partition_graphs },
|m: &mut ConfigProto_Experimental| { &mut m.disable_output_partition_graphs },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"xla_fusion_autotuner_thresh",
|m: &ConfigProto_Experimental| { &m.xla_fusion_autotuner_thresh },
|m: &mut ConfigProto_Experimental| { &mut m.xla_fusion_autotuner_thresh },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"use_tfrt",
|m: &ConfigProto_Experimental| { &m.use_tfrt },
|m: &mut ConfigProto_Experimental| { &mut m.use_tfrt },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"disable_functional_ops_lowering",
|m: &ConfigProto_Experimental| { &m.disable_functional_ops_lowering },
|m: &mut ConfigProto_Experimental| { &mut m.disable_functional_ops_lowering },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"xla_prefer_single_graph_cluster",
|m: &ConfigProto_Experimental| { &m.xla_prefer_single_graph_cluster },
|m: &mut ConfigProto_Experimental| { &mut m.xla_prefer_single_graph_cluster },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::coordination_config::CoordinationServiceConfig>>(
"coordination_config",
|m: &ConfigProto_Experimental| { &m.coordination_config },
|m: &mut ConfigProto_Experimental| { &mut m.coordination_config },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<ConfigProto_Experimental>(
"ConfigProto.Experimental",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static ConfigProto_Experimental {
static instance: ::protobuf::rt::LazyV2<ConfigProto_Experimental> = ::protobuf::rt::LazyV2::INIT;
instance.get(ConfigProto_Experimental::new)
}
}
impl ::protobuf::Clear for ConfigProto_Experimental {
fn clear(&mut self) {
self.collective_group_leader.clear();
self.executor_type.clear();
self.recv_buf_max_chunk = 0;
self.use_numa_affinity = false;
self.collective_deterministic_sequential_execution = false;
self.collective_nccl = false;
self.share_session_state_in_clusterspec_propagation = false;
self.disable_thread_spinning = false;
self.share_cluster_devices_in_session = false;
self.session_metadata.clear();
self.optimize_for_static_graph = false;
self.enable_mlir_bridge = false;
self.mlir_bridge_rollout = ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
self.enable_mlir_graph_optimization = false;
self.disable_output_partition_graphs = false;
self.xla_fusion_autotuner_thresh = 0;
self.use_tfrt = false;
self.disable_functional_ops_lowering = false;
self.xla_prefer_single_graph_cluster = false;
self.coordination_config.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for ConfigProto_Experimental {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for ConfigProto_Experimental {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum ConfigProto_Experimental_MlirBridgeRollout {
MLIR_BRIDGE_ROLLOUT_UNSPECIFIED = 0,
MLIR_BRIDGE_ROLLOUT_ENABLED = 1,
MLIR_BRIDGE_ROLLOUT_DISABLED = 2,
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3,
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED = 4,
}
impl ::protobuf::ProtobufEnum for ConfigProto_Experimental_MlirBridgeRollout {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<ConfigProto_Experimental_MlirBridgeRollout> {
match value {
0 => ::std::option::Option::Some(ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED),
1 => ::std::option::Option::Some(ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_ENABLED),
2 => ::std::option::Option::Some(ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_DISABLED),
3 => ::std::option::Option::Some(ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED),
4 => ::std::option::Option::Some(ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [ConfigProto_Experimental_MlirBridgeRollout] = &[
ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED,
ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_ENABLED,
ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_DISABLED,
ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED,
ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<ConfigProto_Experimental_MlirBridgeRollout>("ConfigProto.Experimental.MlirBridgeRollout", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for ConfigProto_Experimental_MlirBridgeRollout {
}
impl ::std::default::Default for ConfigProto_Experimental_MlirBridgeRollout {
fn default() -> Self {
ConfigProto_Experimental_MlirBridgeRollout::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED
}
}
impl ::protobuf::reflect::ProtobufValue for ConfigProto_Experimental_MlirBridgeRollout {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(PartialEq,Clone,Default)]
pub struct RunOptions {
// message fields
pub trace_level: RunOptions_TraceLevel,
pub timeout_in_ms: i64,
pub inter_op_thread_pool: i32,
pub output_partition_graphs: bool,
pub debug_options: ::protobuf::SingularPtrField<super::debug::DebugOptions>,
pub report_tensor_allocations_upon_oom: bool,
pub experimental: ::protobuf::SingularPtrField<RunOptions_Experimental>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RunOptions {
fn default() -> &'a RunOptions {
<RunOptions as ::protobuf::Message>::default_instance()
}
}
impl RunOptions {
pub fn new() -> RunOptions {
::std::default::Default::default()
}
// .tensorflow.RunOptions.TraceLevel trace_level = 1;
pub fn get_trace_level(&self) -> RunOptions_TraceLevel {
self.trace_level
}
pub fn clear_trace_level(&mut self) {
self.trace_level = RunOptions_TraceLevel::NO_TRACE;
}
// Param is passed by value, moved
pub fn set_trace_level(&mut self, v: RunOptions_TraceLevel) {
self.trace_level = v;
}
// int64 timeout_in_ms = 2;
pub fn get_timeout_in_ms(&self) -> i64 {
self.timeout_in_ms
}
pub fn clear_timeout_in_ms(&mut self) {
self.timeout_in_ms = 0;
}
// Param is passed by value, moved
pub fn set_timeout_in_ms(&mut self, v: i64) {
self.timeout_in_ms = v;
}
// int32 inter_op_thread_pool = 3;
pub fn get_inter_op_thread_pool(&self) -> i32 {
self.inter_op_thread_pool
}
pub fn clear_inter_op_thread_pool(&mut self) {
self.inter_op_thread_pool = 0;
}
// Param is passed by value, moved
pub fn set_inter_op_thread_pool(&mut self, v: i32) {
self.inter_op_thread_pool = v;
}
// bool output_partition_graphs = 5;
pub fn get_output_partition_graphs(&self) -> bool {
self.output_partition_graphs
}
pub fn clear_output_partition_graphs(&mut self) {
self.output_partition_graphs = false;
}
// Param is passed by value, moved
pub fn set_output_partition_graphs(&mut self, v: bool) {
self.output_partition_graphs = v;
}
// .tensorflow.DebugOptions debug_options = 6;
pub fn get_debug_options(&self) -> &super::debug::DebugOptions {
self.debug_options.as_ref().unwrap_or_else(|| <super::debug::DebugOptions as ::protobuf::Message>::default_instance())
}
pub fn clear_debug_options(&mut self) {
self.debug_options.clear();
}
pub fn has_debug_options(&self) -> bool {
self.debug_options.is_some()
}
// Param is passed by value, moved
pub fn set_debug_options(&mut self, v: super::debug::DebugOptions) {
self.debug_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_debug_options(&mut self) -> &mut super::debug::DebugOptions {
if self.debug_options.is_none() {
self.debug_options.set_default();
}
self.debug_options.as_mut().unwrap()
}
// Take field
pub fn take_debug_options(&mut self) -> super::debug::DebugOptions {
self.debug_options.take().unwrap_or_else(|| super::debug::DebugOptions::new())
}
// bool report_tensor_allocations_upon_oom = 7;
pub fn get_report_tensor_allocations_upon_oom(&self) -> bool {
self.report_tensor_allocations_upon_oom
}
pub fn clear_report_tensor_allocations_upon_oom(&mut self) {
self.report_tensor_allocations_upon_oom = false;
}
// Param is passed by value, moved
pub fn set_report_tensor_allocations_upon_oom(&mut self, v: bool) {
self.report_tensor_allocations_upon_oom = v;
}
// .tensorflow.RunOptions.Experimental experimental = 8;
pub fn get_experimental(&self) -> &RunOptions_Experimental {
self.experimental.as_ref().unwrap_or_else(|| <RunOptions_Experimental as ::protobuf::Message>::default_instance())
}
pub fn clear_experimental(&mut self) {
self.experimental.clear();
}
pub fn has_experimental(&self) -> bool {
self.experimental.is_some()
}
// Param is passed by value, moved
pub fn set_experimental(&mut self, v: RunOptions_Experimental) {
self.experimental = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_experimental(&mut self) -> &mut RunOptions_Experimental {
if self.experimental.is_none() {
self.experimental.set_default();
}
self.experimental.as_mut().unwrap()
}
// Take field
pub fn take_experimental(&mut self) -> RunOptions_Experimental {
self.experimental.take().unwrap_or_else(|| RunOptions_Experimental::new())
}
}
impl ::protobuf::Message for RunOptions {
fn is_initialized(&self) -> bool {
for v in &self.debug_options {
if !v.is_initialized() {
return false;
}
};
for v in &self.experimental {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.trace_level, 1, &mut self.unknown_fields)?
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.timeout_in_ms = tmp;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.inter_op_thread_pool = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.output_partition_graphs = tmp;
},
6 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.debug_options)?;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.report_tensor_allocations_upon_oom = tmp;
},
8 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.experimental)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.trace_level != RunOptions_TraceLevel::NO_TRACE {
my_size += ::protobuf::rt::enum_size(1, self.trace_level);
}
if self.timeout_in_ms != 0 {
my_size += ::protobuf::rt::value_size(2, self.timeout_in_ms, ::protobuf::wire_format::WireTypeVarint);
}
if self.inter_op_thread_pool != 0 {
my_size += ::protobuf::rt::value_size(3, self.inter_op_thread_pool, ::protobuf::wire_format::WireTypeVarint);
}
if self.output_partition_graphs != false {
my_size += 2;
}
if let Some(ref v) = self.debug_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if self.report_tensor_allocations_upon_oom != false {
my_size += 2;
}
if let Some(ref v) = self.experimental.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.trace_level != RunOptions_TraceLevel::NO_TRACE {
os.write_enum(1, ::protobuf::ProtobufEnum::value(&self.trace_level))?;
}
if self.timeout_in_ms != 0 {
os.write_int64(2, self.timeout_in_ms)?;
}
if self.inter_op_thread_pool != 0 {
os.write_int32(3, self.inter_op_thread_pool)?;
}
if self.output_partition_graphs != false {
os.write_bool(5, self.output_partition_graphs)?;
}
if let Some(ref v) = self.debug_options.as_ref() {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if self.report_tensor_allocations_upon_oom != false {
os.write_bool(7, self.report_tensor_allocations_upon_oom)?;
}
if let Some(ref v) = self.experimental.as_ref() {
os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RunOptions {
RunOptions::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<RunOptions_TraceLevel>>(
"trace_level",
|m: &RunOptions| { &m.trace_level },
|m: &mut RunOptions| { &mut m.trace_level },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"timeout_in_ms",
|m: &RunOptions| { &m.timeout_in_ms },
|m: &mut RunOptions| { &mut m.timeout_in_ms },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
"inter_op_thread_pool",
|m: &RunOptions| { &m.inter_op_thread_pool },
|m: &mut RunOptions| { &mut m.inter_op_thread_pool },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"output_partition_graphs",
|m: &RunOptions| { &m.output_partition_graphs },
|m: &mut RunOptions| { &mut m.output_partition_graphs },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::debug::DebugOptions>>(
"debug_options",
|m: &RunOptions| { &m.debug_options },
|m: &mut RunOptions| { &mut m.debug_options },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"report_tensor_allocations_upon_oom",
|m: &RunOptions| { &m.report_tensor_allocations_upon_oom },
|m: &mut RunOptions| { &mut m.report_tensor_allocations_upon_oom },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunOptions_Experimental>>(
"experimental",
|m: &RunOptions| { &m.experimental },
|m: &mut RunOptions| { &mut m.experimental },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<RunOptions>(
"RunOptions",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static RunOptions {
static instance: ::protobuf::rt::LazyV2<RunOptions> = ::protobuf::rt::LazyV2::INIT;
instance.get(RunOptions::new)
}
}
impl ::protobuf::Clear for RunOptions {
fn clear(&mut self) {
self.trace_level = RunOptions_TraceLevel::NO_TRACE;
self.timeout_in_ms = 0;
self.inter_op_thread_pool = 0;
self.output_partition_graphs = false;
self.debug_options.clear();
self.report_tensor_allocations_upon_oom = false;
self.experimental.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RunOptions {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RunOptions {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct RunOptions_Experimental {
// message fields
pub collective_graph_key: i64,
pub use_run_handler_pool: bool,
pub run_handler_pool_options: ::protobuf::SingularPtrField<RunOptions_Experimental_RunHandlerPoolOptions>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RunOptions_Experimental {
fn default() -> &'a RunOptions_Experimental {
<RunOptions_Experimental as ::protobuf::Message>::default_instance()
}
}
impl RunOptions_Experimental {
pub fn new() -> RunOptions_Experimental {
::std::default::Default::default()
}
// int64 collective_graph_key = 1;
pub fn get_collective_graph_key(&self) -> i64 {
self.collective_graph_key
}
pub fn clear_collective_graph_key(&mut self) {
self.collective_graph_key = 0;
}
// Param is passed by value, moved
pub fn set_collective_graph_key(&mut self, v: i64) {
self.collective_graph_key = v;
}
// bool use_run_handler_pool = 2;
pub fn get_use_run_handler_pool(&self) -> bool {
self.use_run_handler_pool
}
pub fn clear_use_run_handler_pool(&mut self) {
self.use_run_handler_pool = false;
}
// Param is passed by value, moved
pub fn set_use_run_handler_pool(&mut self, v: bool) {
self.use_run_handler_pool = v;
}
// .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
pub fn get_run_handler_pool_options(&self) -> &RunOptions_Experimental_RunHandlerPoolOptions {
self.run_handler_pool_options.as_ref().unwrap_or_else(|| <RunOptions_Experimental_RunHandlerPoolOptions as ::protobuf::Message>::default_instance())
}
pub fn clear_run_handler_pool_options(&mut self) {
self.run_handler_pool_options.clear();
}
pub fn has_run_handler_pool_options(&self) -> bool {
self.run_handler_pool_options.is_some()
}
// Param is passed by value, moved
pub fn set_run_handler_pool_options(&mut self, v: RunOptions_Experimental_RunHandlerPoolOptions) {
self.run_handler_pool_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_run_handler_pool_options(&mut self) -> &mut RunOptions_Experimental_RunHandlerPoolOptions {
if self.run_handler_pool_options.is_none() {
self.run_handler_pool_options.set_default();
}
self.run_handler_pool_options.as_mut().unwrap()
}
// Take field
pub fn take_run_handler_pool_options(&mut self) -> RunOptions_Experimental_RunHandlerPoolOptions {
self.run_handler_pool_options.take().unwrap_or_else(|| RunOptions_Experimental_RunHandlerPoolOptions::new())
}
}
impl ::protobuf::Message for RunOptions_Experimental {
fn is_initialized(&self) -> bool {
for v in &self.run_handler_pool_options {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.collective_graph_key = tmp;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.use_run_handler_pool = tmp;
},
3 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.run_handler_pool_options)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.collective_graph_key != 0 {
my_size += ::protobuf::rt::value_size(1, self.collective_graph_key, ::protobuf::wire_format::WireTypeVarint);
}
if self.use_run_handler_pool != false {
my_size += 2;
}
if let Some(ref v) = self.run_handler_pool_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.collective_graph_key != 0 {
os.write_int64(1, self.collective_graph_key)?;
}
if self.use_run_handler_pool != false {
os.write_bool(2, self.use_run_handler_pool)?;
}
if let Some(ref v) = self.run_handler_pool_options.as_ref() {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RunOptions_Experimental {
RunOptions_Experimental::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"collective_graph_key",
|m: &RunOptions_Experimental| { &m.collective_graph_key },
|m: &mut RunOptions_Experimental| { &mut m.collective_graph_key },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"use_run_handler_pool",
|m: &RunOptions_Experimental| { &m.use_run_handler_pool },
|m: &mut RunOptions_Experimental| { &mut m.use_run_handler_pool },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunOptions_Experimental_RunHandlerPoolOptions>>(
"run_handler_pool_options",
|m: &RunOptions_Experimental| { &m.run_handler_pool_options },
|m: &mut RunOptions_Experimental| { &mut m.run_handler_pool_options },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<RunOptions_Experimental>(
"RunOptions.Experimental",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static RunOptions_Experimental {
static instance: ::protobuf::rt::LazyV2<RunOptions_Experimental> = ::protobuf::rt::LazyV2::INIT;
instance.get(RunOptions_Experimental::new)
}
}
impl ::protobuf::Clear for RunOptions_Experimental {
fn clear(&mut self) {
self.collective_graph_key = 0;
self.use_run_handler_pool = false;
self.run_handler_pool_options.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RunOptions_Experimental {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RunOptions_Experimental {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct RunOptions_Experimental_RunHandlerPoolOptions {
// message fields
pub priority: i64,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RunOptions_Experimental_RunHandlerPoolOptions {
fn default() -> &'a RunOptions_Experimental_RunHandlerPoolOptions {
<RunOptions_Experimental_RunHandlerPoolOptions as ::protobuf::Message>::default_instance()
}
}
impl RunOptions_Experimental_RunHandlerPoolOptions {
pub fn new() -> RunOptions_Experimental_RunHandlerPoolOptions {
::std::default::Default::default()
}
// int64 priority = 1;
pub fn get_priority(&self) -> i64 {
self.priority
}
pub fn clear_priority(&mut self) {
self.priority = 0;
}
// Param is passed by value, moved
pub fn set_priority(&mut self, v: i64) {
self.priority = v;
}
}
impl ::protobuf::Message for RunOptions_Experimental_RunHandlerPoolOptions {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.priority = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.priority != 0 {
my_size += ::protobuf::rt::value_size(1, self.priority, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.priority != 0 {
os.write_int64(1, self.priority)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RunOptions_Experimental_RunHandlerPoolOptions {
RunOptions_Experimental_RunHandlerPoolOptions::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"priority",
|m: &RunOptions_Experimental_RunHandlerPoolOptions| { &m.priority },
|m: &mut RunOptions_Experimental_RunHandlerPoolOptions| { &mut m.priority },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<RunOptions_Experimental_RunHandlerPoolOptions>(
"RunOptions.Experimental.RunHandlerPoolOptions",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static RunOptions_Experimental_RunHandlerPoolOptions {
static instance: ::protobuf::rt::LazyV2<RunOptions_Experimental_RunHandlerPoolOptions> = ::protobuf::rt::LazyV2::INIT;
instance.get(RunOptions_Experimental_RunHandlerPoolOptions::new)
}
}
impl ::protobuf::Clear for RunOptions_Experimental_RunHandlerPoolOptions {
fn clear(&mut self) {
self.priority = 0;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RunOptions_Experimental_RunHandlerPoolOptions {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RunOptions_Experimental_RunHandlerPoolOptions {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum RunOptions_TraceLevel {
NO_TRACE = 0,
SOFTWARE_TRACE = 1,
HARDWARE_TRACE = 2,
FULL_TRACE = 3,
}
impl ::protobuf::ProtobufEnum for RunOptions_TraceLevel {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<RunOptions_TraceLevel> {
match value {
0 => ::std::option::Option::Some(RunOptions_TraceLevel::NO_TRACE),
1 => ::std::option::Option::Some(RunOptions_TraceLevel::SOFTWARE_TRACE),
2 => ::std::option::Option::Some(RunOptions_TraceLevel::HARDWARE_TRACE),
3 => ::std::option::Option::Some(RunOptions_TraceLevel::FULL_TRACE),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [RunOptions_TraceLevel] = &[
RunOptions_TraceLevel::NO_TRACE,
RunOptions_TraceLevel::SOFTWARE_TRACE,
RunOptions_TraceLevel::HARDWARE_TRACE,
RunOptions_TraceLevel::FULL_TRACE,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<RunOptions_TraceLevel>("RunOptions.TraceLevel", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for RunOptions_TraceLevel {
}
impl ::std::default::Default for RunOptions_TraceLevel {
fn default() -> Self {
RunOptions_TraceLevel::NO_TRACE
}
}
impl ::protobuf::reflect::ProtobufValue for RunOptions_TraceLevel {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(PartialEq,Clone,Default)]
pub struct RunMetadata {
// message fields
pub step_stats: ::protobuf::SingularPtrField<super::step_stats::StepStats>,
pub cost_graph: ::protobuf::SingularPtrField<super::cost_graph::CostGraphDef>,
pub partition_graphs: ::protobuf::RepeatedField<super::graph::GraphDef>,
pub function_graphs: ::protobuf::RepeatedField<RunMetadata_FunctionGraphs>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RunMetadata {
fn default() -> &'a RunMetadata {
<RunMetadata as ::protobuf::Message>::default_instance()
}
}
impl RunMetadata {
pub fn new() -> RunMetadata {
::std::default::Default::default()
}
// .tensorflow.StepStats step_stats = 1;
pub fn get_step_stats(&self) -> &super::step_stats::StepStats {
self.step_stats.as_ref().unwrap_or_else(|| <super::step_stats::StepStats as ::protobuf::Message>::default_instance())
}
pub fn clear_step_stats(&mut self) {
self.step_stats.clear();
}
pub fn has_step_stats(&self) -> bool {
self.step_stats.is_some()
}
// Param is passed by value, moved
pub fn set_step_stats(&mut self, v: super::step_stats::StepStats) {
self.step_stats = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_step_stats(&mut self) -> &mut super::step_stats::StepStats {
if self.step_stats.is_none() {
self.step_stats.set_default();
}
self.step_stats.as_mut().unwrap()
}
// Take field
pub fn take_step_stats(&mut self) -> super::step_stats::StepStats {
self.step_stats.take().unwrap_or_else(|| super::step_stats::StepStats::new())
}
// .tensorflow.CostGraphDef cost_graph = 2;
pub fn get_cost_graph(&self) -> &super::cost_graph::CostGraphDef {
self.cost_graph.as_ref().unwrap_or_else(|| <super::cost_graph::CostGraphDef as ::protobuf::Message>::default_instance())
}
pub fn clear_cost_graph(&mut self) {
self.cost_graph.clear();
}
pub fn has_cost_graph(&self) -> bool {
self.cost_graph.is_some()
}
// Param is passed by value, moved
pub fn set_cost_graph(&mut self, v: super::cost_graph::CostGraphDef) {
self.cost_graph = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_cost_graph(&mut self) -> &mut super::cost_graph::CostGraphDef {
if self.cost_graph.is_none() {
self.cost_graph.set_default();
}
self.cost_graph.as_mut().unwrap()
}
// Take field
pub fn take_cost_graph(&mut self) -> super::cost_graph::CostGraphDef {
self.cost_graph.take().unwrap_or_else(|| super::cost_graph::CostGraphDef::new())
}
// repeated .tensorflow.GraphDef partition_graphs = 3;
pub fn get_partition_graphs(&self) -> &[super::graph::GraphDef] {
&self.partition_graphs
}
pub fn clear_partition_graphs(&mut self) {
self.partition_graphs.clear();
}
// Param is passed by value, moved
pub fn set_partition_graphs(&mut self, v: ::protobuf::RepeatedField<super::graph::GraphDef>) {
self.partition_graphs = v;
}
// Mutable pointer to the field.
pub fn mut_partition_graphs(&mut self) -> &mut ::protobuf::RepeatedField<super::graph::GraphDef> {
&mut self.partition_graphs
}
// Take field
pub fn take_partition_graphs(&mut self) -> ::protobuf::RepeatedField<super::graph::GraphDef> {
::std::mem::replace(&mut self.partition_graphs, ::protobuf::RepeatedField::new())
}
// repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
pub fn get_function_graphs(&self) -> &[RunMetadata_FunctionGraphs] {
&self.function_graphs
}
pub fn clear_function_graphs(&mut self) {
self.function_graphs.clear();
}
// Param is passed by value, moved
pub fn set_function_graphs(&mut self, v: ::protobuf::RepeatedField<RunMetadata_FunctionGraphs>) {
self.function_graphs = v;
}
// Mutable pointer to the field.
pub fn mut_function_graphs(&mut self) -> &mut ::protobuf::RepeatedField<RunMetadata_FunctionGraphs> {
&mut self.function_graphs
}
// Take field
pub fn take_function_graphs(&mut self) -> ::protobuf::RepeatedField<RunMetadata_FunctionGraphs> {
::std::mem::replace(&mut self.function_graphs, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for RunMetadata {
fn is_initialized(&self) -> bool {
for v in &self.step_stats {
if !v.is_initialized() {
return false;
}
};
for v in &self.cost_graph {
if !v.is_initialized() {
return false;
}
};
for v in &self.partition_graphs {
if !v.is_initialized() {
return false;
}
};
for v in &self.function_graphs {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.step_stats)?;
},
2 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.cost_graph)?;
},
3 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.partition_graphs)?;
},
4 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.function_graphs)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let Some(ref v) = self.step_stats.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.cost_graph.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
for value in &self.partition_graphs {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.function_graphs {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let Some(ref v) = self.step_stats.as_ref() {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.cost_graph.as_ref() {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
for v in &self.partition_graphs {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.function_graphs {
os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RunMetadata {
RunMetadata::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::step_stats::StepStats>>(
"step_stats",
|m: &RunMetadata| { &m.step_stats },
|m: &mut RunMetadata| { &mut m.step_stats },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::cost_graph::CostGraphDef>>(
"cost_graph",
|m: &RunMetadata| { &m.cost_graph },
|m: &mut RunMetadata| { &mut m.cost_graph },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
"partition_graphs",
|m: &RunMetadata| { &m.partition_graphs },
|m: &mut RunMetadata| { &mut m.partition_graphs },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunMetadata_FunctionGraphs>>(
"function_graphs",
|m: &RunMetadata| { &m.function_graphs },
|m: &mut RunMetadata| { &mut m.function_graphs },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<RunMetadata>(
"RunMetadata",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static RunMetadata {
static instance: ::protobuf::rt::LazyV2<RunMetadata> = ::protobuf::rt::LazyV2::INIT;
instance.get(RunMetadata::new)
}
}
impl ::protobuf::Clear for RunMetadata {
fn clear(&mut self) {
self.step_stats.clear();
self.cost_graph.clear();
self.partition_graphs.clear();
self.function_graphs.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RunMetadata {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RunMetadata {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct RunMetadata_FunctionGraphs {
// message fields
pub partition_graphs: ::protobuf::RepeatedField<super::graph::GraphDef>,
pub pre_optimization_graph: ::protobuf::SingularPtrField<super::graph::GraphDef>,
pub post_optimization_graph: ::protobuf::SingularPtrField<super::graph::GraphDef>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RunMetadata_FunctionGraphs {
fn default() -> &'a RunMetadata_FunctionGraphs {
<RunMetadata_FunctionGraphs as ::protobuf::Message>::default_instance()
}
}
impl RunMetadata_FunctionGraphs {
pub fn new() -> RunMetadata_FunctionGraphs {
::std::default::Default::default()
}
// repeated .tensorflow.GraphDef partition_graphs = 1;
pub fn get_partition_graphs(&self) -> &[super::graph::GraphDef] {
&self.partition_graphs
}
pub fn clear_partition_graphs(&mut self) {
self.partition_graphs.clear();
}
// Param is passed by value, moved
pub fn set_partition_graphs(&mut self, v: ::protobuf::RepeatedField<super::graph::GraphDef>) {
self.partition_graphs = v;
}
// Mutable pointer to the field.
pub fn mut_partition_graphs(&mut self) -> &mut ::protobuf::RepeatedField<super::graph::GraphDef> {
&mut self.partition_graphs
}
// Take field
pub fn take_partition_graphs(&mut self) -> ::protobuf::RepeatedField<super::graph::GraphDef> {
::std::mem::replace(&mut self.partition_graphs, ::protobuf::RepeatedField::new())
}
// .tensorflow.GraphDef pre_optimization_graph = 2;
pub fn get_pre_optimization_graph(&self) -> &super::graph::GraphDef {
self.pre_optimization_graph.as_ref().unwrap_or_else(|| <super::graph::GraphDef as ::protobuf::Message>::default_instance())
}
pub fn clear_pre_optimization_graph(&mut self) {
self.pre_optimization_graph.clear();
}
pub fn has_pre_optimization_graph(&self) -> bool {
self.pre_optimization_graph.is_some()
}
// Param is passed by value, moved
pub fn set_pre_optimization_graph(&mut self, v: super::graph::GraphDef) {
self.pre_optimization_graph = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_pre_optimization_graph(&mut self) -> &mut super::graph::GraphDef {
if self.pre_optimization_graph.is_none() {
self.pre_optimization_graph.set_default();
}
self.pre_optimization_graph.as_mut().unwrap()
}
// Take field
pub fn take_pre_optimization_graph(&mut self) -> super::graph::GraphDef {
self.pre_optimization_graph.take().unwrap_or_else(|| super::graph::GraphDef::new())
}
// .tensorflow.GraphDef post_optimization_graph = 3;
pub fn get_post_optimization_graph(&self) -> &super::graph::GraphDef {
self.post_optimization_graph.as_ref().unwrap_or_else(|| <super::graph::GraphDef as ::protobuf::Message>::default_instance())
}
pub fn clear_post_optimization_graph(&mut self) {
self.post_optimization_graph.clear();
}
pub fn has_post_optimization_graph(&self) -> bool {
self.post_optimization_graph.is_some()
}
// Param is passed by value, moved
pub fn set_post_optimization_graph(&mut self, v: super::graph::GraphDef) {
self.post_optimization_graph = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_post_optimization_graph(&mut self) -> &mut super::graph::GraphDef {
if self.post_optimization_graph.is_none() {
self.post_optimization_graph.set_default();
}
self.post_optimization_graph.as_mut().unwrap()
}
// Take field
pub fn take_post_optimization_graph(&mut self) -> super::graph::GraphDef {
self.post_optimization_graph.take().unwrap_or_else(|| super::graph::GraphDef::new())
}
}
impl ::protobuf::Message for RunMetadata_FunctionGraphs {
fn is_initialized(&self) -> bool {
for v in &self.partition_graphs {
if !v.is_initialized() {
return false;
}
};
for v in &self.pre_optimization_graph {
if !v.is_initialized() {
return false;
}
};
for v in &self.post_optimization_graph {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.partition_graphs)?;
},
2 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.pre_optimization_graph)?;
},
3 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.post_optimization_graph)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.partition_graphs {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if let Some(ref v) = self.pre_optimization_graph.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.post_optimization_graph.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.partition_graphs {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if let Some(ref v) = self.pre_optimization_graph.as_ref() {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.post_optimization_graph.as_ref() {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RunMetadata_FunctionGraphs {
RunMetadata_FunctionGraphs::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
"partition_graphs",
|m: &RunMetadata_FunctionGraphs| { &m.partition_graphs },
|m: &mut RunMetadata_FunctionGraphs| { &mut m.partition_graphs },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
"pre_optimization_graph",
|m: &RunMetadata_FunctionGraphs| { &m.pre_optimization_graph },
|m: &mut RunMetadata_FunctionGraphs| { &mut m.pre_optimization_graph },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
"post_optimization_graph",
|m: &RunMetadata_FunctionGraphs| { &m.post_optimization_graph },
|m: &mut RunMetadata_FunctionGraphs| { &mut m.post_optimization_graph },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<RunMetadata_FunctionGraphs>(
"RunMetadata.FunctionGraphs",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static RunMetadata_FunctionGraphs {
static instance: ::protobuf::rt::LazyV2<RunMetadata_FunctionGraphs> = ::protobuf::rt::LazyV2::INIT;
instance.get(RunMetadata_FunctionGraphs::new)
}
}
impl ::protobuf::Clear for RunMetadata_FunctionGraphs {
fn clear(&mut self) {
self.partition_graphs.clear();
self.pre_optimization_graph.clear();
self.post_optimization_graph.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RunMetadata_FunctionGraphs {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RunMetadata_FunctionGraphs {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct TensorConnection {
// message fields
pub from_tensor: ::std::string::String,
pub to_tensor: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TensorConnection {
fn default() -> &'a TensorConnection {
<TensorConnection as ::protobuf::Message>::default_instance()
}
}
impl TensorConnection {
pub fn new() -> TensorConnection {
::std::default::Default::default()
}
// string from_tensor = 1;
pub fn get_from_tensor(&self) -> &str {
&self.from_tensor
}
pub fn clear_from_tensor(&mut self) {
self.from_tensor.clear();
}
// Param is passed by value, moved
pub fn set_from_tensor(&mut self, v: ::std::string::String) {
self.from_tensor = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_from_tensor(&mut self) -> &mut ::std::string::String {
&mut self.from_tensor
}
// Take field
pub fn take_from_tensor(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.from_tensor, ::std::string::String::new())
}
// string to_tensor = 2;
pub fn get_to_tensor(&self) -> &str {
&self.to_tensor
}
pub fn clear_to_tensor(&mut self) {
self.to_tensor.clear();
}
// Param is passed by value, moved
pub fn set_to_tensor(&mut self, v: ::std::string::String) {
self.to_tensor = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_to_tensor(&mut self) -> &mut ::std::string::String {
&mut self.to_tensor
}
// Take field
pub fn take_to_tensor(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.to_tensor, ::std::string::String::new())
}
}
impl ::protobuf::Message for TensorConnection {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.from_tensor)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.to_tensor)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.from_tensor.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.from_tensor);
}
if !self.to_tensor.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.to_tensor);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.from_tensor.is_empty() {
os.write_string(1, &self.from_tensor)?;
}
if !self.to_tensor.is_empty() {
os.write_string(2, &self.to_tensor)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TensorConnection {
TensorConnection::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"from_tensor",
|m: &TensorConnection| { &m.from_tensor },
|m: &mut TensorConnection| { &mut m.from_tensor },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"to_tensor",
|m: &TensorConnection| { &m.to_tensor },
|m: &mut TensorConnection| { &mut m.to_tensor },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<TensorConnection>(
"TensorConnection",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static TensorConnection {
static instance: ::protobuf::rt::LazyV2<TensorConnection> = ::protobuf::rt::LazyV2::INIT;
instance.get(TensorConnection::new)
}
}
impl ::protobuf::Clear for TensorConnection {
fn clear(&mut self) {
self.from_tensor.clear();
self.to_tensor.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for TensorConnection {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for TensorConnection {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct CallableOptions {
// message fields
pub feed: ::protobuf::RepeatedField<::std::string::String>,
pub fetch: ::protobuf::RepeatedField<::std::string::String>,
pub target: ::protobuf::RepeatedField<::std::string::String>,
pub run_options: ::protobuf::SingularPtrField<RunOptions>,
pub tensor_connection: ::protobuf::RepeatedField<TensorConnection>,
pub feed_devices: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
pub fetch_devices: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
pub fetch_skip_sync: bool,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CallableOptions {
fn default() -> &'a CallableOptions {
<CallableOptions as ::protobuf::Message>::default_instance()
}
}
impl CallableOptions {
pub fn new() -> CallableOptions {
::std::default::Default::default()
}
// repeated string feed = 1;
pub fn get_feed(&self) -> &[::std::string::String] {
&self.feed
}
pub fn clear_feed(&mut self) {
self.feed.clear();
}
// Param is passed by value, moved
pub fn set_feed(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
self.feed = v;
}
// Mutable pointer to the field.
pub fn mut_feed(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
&mut self.feed
}
// Take field
pub fn take_feed(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
::std::mem::replace(&mut self.feed, ::protobuf::RepeatedField::new())
}
// repeated string fetch = 2;
pub fn get_fetch(&self) -> &[::std::string::String] {
&self.fetch
}
pub fn clear_fetch(&mut self) {
self.fetch.clear();
}
// Param is passed by value, moved
pub fn set_fetch(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
self.fetch = v;
}
// Mutable pointer to the field.
pub fn mut_fetch(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
&mut self.fetch
}
// Take field
pub fn take_fetch(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
::std::mem::replace(&mut self.fetch, ::protobuf::RepeatedField::new())
}
// repeated string target = 3;
pub fn get_target(&self) -> &[::std::string::String] {
&self.target
}
pub fn clear_target(&mut self) {
self.target.clear();
}
// Param is passed by value, moved
pub fn set_target(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
self.target = v;
}
// Mutable pointer to the field.
pub fn mut_target(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
&mut self.target
}
// Take field
pub fn take_target(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
::std::mem::replace(&mut self.target, ::protobuf::RepeatedField::new())
}
// .tensorflow.RunOptions run_options = 4;
pub fn get_run_options(&self) -> &RunOptions {
self.run_options.as_ref().unwrap_or_else(|| <RunOptions as ::protobuf::Message>::default_instance())
}
pub fn clear_run_options(&mut self) {
self.run_options.clear();
}
pub fn has_run_options(&self) -> bool {
self.run_options.is_some()
}
// Param is passed by value, moved
pub fn set_run_options(&mut self, v: RunOptions) {
self.run_options = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_run_options(&mut self) -> &mut RunOptions {
if self.run_options.is_none() {
self.run_options.set_default();
}
self.run_options.as_mut().unwrap()
}
// Take field
pub fn take_run_options(&mut self) -> RunOptions {
self.run_options.take().unwrap_or_else(|| RunOptions::new())
}
// repeated .tensorflow.TensorConnection tensor_connection = 5;
pub fn get_tensor_connection(&self) -> &[TensorConnection] {
&self.tensor_connection
}
pub fn clear_tensor_connection(&mut self) {
self.tensor_connection.clear();
}
// Param is passed by value, moved
pub fn set_tensor_connection(&mut self, v: ::protobuf::RepeatedField<TensorConnection>) {
self.tensor_connection = v;
}
// Mutable pointer to the field.
pub fn mut_tensor_connection(&mut self) -> &mut ::protobuf::RepeatedField<TensorConnection> {
&mut self.tensor_connection
}
// Take field
pub fn take_tensor_connection(&mut self) -> ::protobuf::RepeatedField<TensorConnection> {
::std::mem::replace(&mut self.tensor_connection, ::protobuf::RepeatedField::new())
}
// repeated .tensorflow.CallableOptions.FeedDevicesEntry feed_devices = 6;
pub fn get_feed_devices(&self) -> &::std::collections::HashMap<::std::string::String, ::std::string::String> {
&self.feed_devices
}
pub fn clear_feed_devices(&mut self) {
self.feed_devices.clear();
}
// Param is passed by value, moved
pub fn set_feed_devices(&mut self, v: ::std::collections::HashMap<::std::string::String, ::std::string::String>) {
self.feed_devices = v;
}
// Mutable pointer to the field.
pub fn mut_feed_devices(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, ::std::string::String> {
&mut self.feed_devices
}
// Take field
pub fn take_feed_devices(&mut self) -> ::std::collections::HashMap<::std::string::String, ::std::string::String> {
::std::mem::replace(&mut self.feed_devices, ::std::collections::HashMap::new())
}
// repeated .tensorflow.CallableOptions.FetchDevicesEntry fetch_devices = 7;
pub fn get_fetch_devices(&self) -> &::std::collections::HashMap<::std::string::String, ::std::string::String> {
&self.fetch_devices
}
pub fn clear_fetch_devices(&mut self) {
self.fetch_devices.clear();
}
// Param is passed by value, moved
pub fn set_fetch_devices(&mut self, v: ::std::collections::HashMap<::std::string::String, ::std::string::String>) {
self.fetch_devices = v;
}
// Mutable pointer to the field.
pub fn mut_fetch_devices(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, ::std::string::String> {
&mut self.fetch_devices
}
// Take field
pub fn take_fetch_devices(&mut self) -> ::std::collections::HashMap<::std::string::String, ::std::string::String> {
::std::mem::replace(&mut self.fetch_devices, ::std::collections::HashMap::new())
}
// bool fetch_skip_sync = 8;
pub fn get_fetch_skip_sync(&self) -> bool {
self.fetch_skip_sync
}
pub fn clear_fetch_skip_sync(&mut self) {
self.fetch_skip_sync = false;
}
// Param is passed by value, moved
pub fn set_fetch_skip_sync(&mut self, v: bool) {
self.fetch_skip_sync = v;
}
}
impl ::protobuf::Message for CallableOptions {
fn is_initialized(&self) -> bool {
for v in &self.run_options {
if !v.is_initialized() {
return false;
}
};
for v in &self.tensor_connection {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.feed)?;
},
2 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.fetch)?;
},
3 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.target)?;
},
4 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.run_options)?;
},
5 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.tensor_connection)?;
},
6 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(wire_type, is, &mut self.feed_devices)?;
},
7 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(wire_type, is, &mut self.fetch_devices)?;
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.fetch_skip_sync = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.feed {
my_size += ::protobuf::rt::string_size(1, &value);
};
for value in &self.fetch {
my_size += ::protobuf::rt::string_size(2, &value);
};
for value in &self.target {
my_size += ::protobuf::rt::string_size(3, &value);
};
if let Some(ref v) = self.run_options.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
for value in &self.tensor_connection {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(6, &self.feed_devices);
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(7, &self.fetch_devices);
if self.fetch_skip_sync != false {
my_size += 2;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.feed {
os.write_string(1, &v)?;
};
for v in &self.fetch {
os.write_string(2, &v)?;
};
for v in &self.target {
os.write_string(3, &v)?;
};
if let Some(ref v) = self.run_options.as_ref() {
os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
for v in &self.tensor_connection {
os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(6, &self.feed_devices, os)?;
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(7, &self.fetch_devices, os)?;
if self.fetch_skip_sync != false {
os.write_bool(8, self.fetch_skip_sync)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CallableOptions {
CallableOptions::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"feed",
|m: &CallableOptions| { &m.feed },
|m: &mut CallableOptions| { &mut m.feed },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"fetch",
|m: &CallableOptions| { &m.fetch },
|m: &mut CallableOptions| { &mut m.fetch },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"target",
|m: &CallableOptions| { &m.target },
|m: &mut CallableOptions| { &mut m.target },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunOptions>>(
"run_options",
|m: &CallableOptions| { &m.run_options },
|m: &mut CallableOptions| { &mut m.run_options },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<TensorConnection>>(
"tensor_connection",
|m: &CallableOptions| { &m.tensor_connection },
|m: &mut CallableOptions| { &mut m.tensor_connection },
));
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(
"feed_devices",
|m: &CallableOptions| { &m.feed_devices },
|m: &mut CallableOptions| { &mut m.feed_devices },
));
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(
"fetch_devices",
|m: &CallableOptions| { &m.fetch_devices },
|m: &mut CallableOptions| { &mut m.fetch_devices },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"fetch_skip_sync",
|m: &CallableOptions| { &m.fetch_skip_sync },
|m: &mut CallableOptions| { &mut m.fetch_skip_sync },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CallableOptions>(
"CallableOptions",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static CallableOptions {
static instance: ::protobuf::rt::LazyV2<CallableOptions> = ::protobuf::rt::LazyV2::INIT;
instance.get(CallableOptions::new)
}
}
impl ::protobuf::Clear for CallableOptions {
fn clear(&mut self) {
self.feed.clear();
self.fetch.clear();
self.target.clear();
self.run_options.clear();
self.tensor_connection.clear();
self.feed_devices.clear();
self.fetch_devices.clear();
self.fetch_skip_sync = false;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CallableOptions {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CallableOptions {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n%tensorflow/core/protobuf/config.proto\x12\ntensorflow\x1a*tensorflow/\
core/framework/cost_graph.proto\x1a%tensorflow/core/framework/graph.prot\
o\x1a*tensorflow/core/framework/step_stats.proto\x1a&tensorflow/core/pro\
tobuf/cluster.proto\x1a2tensorflow/core/protobuf/coordination_config.pro\
to\x1a$tensorflow/core/protobuf/debug.proto\x1a.tensorflow/core/protobuf\
/rewriter_config.proto\"\xc5\t\n\nGPUOptions\x12D\n\x1fper_process_gpu_m\
emory_fraction\x18\x01\x20\x01(\x01R\x1bperProcessGpuMemoryFraction\x12!\
\n\x0callow_growth\x18\x04\x20\x01(\x08R\x0ballowGrowth\x12%\n\x0ealloca\
tor_type\x18\x02\x20\x01(\tR\rallocatorType\x126\n\x17deferred_deletion_\
bytes\x18\x03\x20\x01(\x03R\x15deferredDeletionBytes\x12.\n\x13visible_d\
evice_list\x18\x05\x20\x01(\tR\x11visibleDeviceList\x12;\n\x1apolling_ac\
tive_delay_usecs\x18\x06\x20\x01(\x05R\x17pollingActiveDelayUsecs\x12?\n\
\x1cpolling_inactive_delay_msecs\x18\x07\x20\x01(\x05R\x19pollingInactiv\
eDelayMsecs\x120\n\x14force_gpu_compatible\x18\x08\x20\x01(\x08R\x12forc\
eGpuCompatible\x12G\n\x0cexperimental\x18\t\x20\x01(\x0b2#.tensorflow.GP\
UOptions.ExperimentalR\x0cexperimental\x1a\xc5\x05\n\x0cExperimental\x12\
[\n\x0fvirtual_devices\x18\x01\x20\x03(\x0b22.tensorflow.GPUOptions.Expe\
rimental.VirtualDevicesR\x0evirtualDevices\x12,\n\x12use_unified_memory\
\x18\x02\x20\x01(\x08R\x10useUnifiedMemory\x12;\n\x1bnum_dev_to_dev_copy\
_streams\x18\x03\x20\x01(\x05R\x16numDevToDevCopyStreams\x122\n\x15colle\
ctive_ring_order\x18\x04\x20\x01(\tR\x13collectiveRingOrder\x123\n\x15ti\
mestamped_allocator\x18\x05\x20\x01(\x08R\x14timestampedAllocator\x12=\n\
\x1bkernel_tracker_max_interval\x18\x07\x20\x01(\x05R\x18kernelTrackerMa\
xInterval\x127\n\x18kernel_tracker_max_bytes\x18\x08\x20\x01(\x05R\x15ke\
rnelTrackerMaxBytes\x12;\n\x1akernel_tracker_max_pending\x18\t\x20\x01(\
\x05R\x17kernelTrackerMaxPending\x12F\n\x1finternal_fragmentation_fracti\
on\x18\n\x20\x01(\x01R\x1dinternalFragmentationFraction\x121\n\x15use_cu\
da_malloc_async\x18\x0b\x20\x01(\x08R\x12useCudaMallocAsync\x1aT\n\x0eVi\
rtualDevices\x12&\n\x0fmemory_limit_mb\x18\x01\x20\x03(\x02R\rmemoryLimi\
tMb\x12\x1a\n\x08priority\x18\x02\x20\x03(\x05R\x08priority\"\xa8\x04\n\
\x10OptimizerOptions\x12M\n#do_common_subexpression_elimination\x18\x01\
\x20\x01(\x08R\x20doCommonSubexpressionElimination\x12.\n\x13do_constant\
_folding\x18\x02\x20\x01(\x08R\x11doConstantFolding\x12>\n\x1cmax_folded\
_constant_in_bytes\x18\x06\x20\x01(\x03R\x18maxFoldedConstantInBytes\x12\
0\n\x14do_function_inlining\x18\x04\x20\x01(\x08R\x12doFunctionInlining\
\x12?\n\topt_level\x18\x03\x20\x01(\x0e2\".tensorflow.OptimizerOptions.L\
evelR\x08optLevel\x12U\n\x10global_jit_level\x18\x05\x20\x01(\x0e2+.tens\
orflow.OptimizerOptions.GlobalJitLevelR\x0eglobalJitLevel\x12$\n\x0ecpu_\
global_jit\x18\x07\x20\x01(\x08R\x0ccpuGlobalJit\"\x20\n\x05Level\x12\
\x06\n\x02L1\x10\0\x12\x0f\n\x02L0\x10\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\x01\"C\n\x0eGlobalJitLevel\x12\x0b\n\x07DEFAULT\x10\0\x12\x10\n\x03\
OFF\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x08\n\x04ON_1\x10\
\x01\x12\x08\n\x04ON_2\x10\x02\"\x90\x04\n\x0cGraphOptions\x124\n\x16ena\
ble_recv_scheduling\x18\x02\x20\x01(\x08R\x14enableRecvScheduling\x12I\n\
\x11optimizer_options\x18\x03\x20\x01(\x0b2\x1c.tensorflow.OptimizerOpti\
onsR\x10optimizerOptions\x12(\n\x10build_cost_model\x18\x04\x20\x01(\x03\
R\x0ebuildCostModel\x123\n\x16build_cost_model_after\x18\t\x20\x01(\x03R\
\x13buildCostModelAfter\x12!\n\x0cinfer_shapes\x18\x05\x20\x01(\x08R\x0b\
inferShapes\x12,\n\x12place_pruned_graph\x18\x06\x20\x01(\x08R\x10placeP\
runedGraph\x128\n\x18enable_bfloat16_sendrecv\x18\x07\x20\x01(\x08R\x16e\
nableBfloat16Sendrecv\x12#\n\rtimeline_step\x18\x08\x20\x01(\x05R\x0ctim\
elineStep\x12C\n\x0frewrite_options\x18\n\x20\x01(\x0b2\x1a.tensorflow.R\
ewriterConfigR\x0erewriteOptionsJ\x04\x08\x01\x10\x02R%skip_common_subex\
pression_elimination\"Y\n\x15ThreadPoolOptionProto\x12\x1f\n\x0bnum_thre\
ads\x18\x01\x20\x01(\x05R\nnumThreads\x12\x1f\n\x0bglobal_name\x18\x02\
\x20\x01(\tR\nglobalName\"\xe0\x02\n\nRPCOptions\x12>\n\x1cuse_rpc_for_i\
nprocess_master\x18\x01\x20\x01(\x08R\x18useRpcForInprocessMaster\x123\n\
\x15compression_algorithm\x18\x02\x20\x01(\tR\x14compressionAlgorithm\
\x12+\n\x11compression_level\x18\x03\x20\x01(\x05R\x10compressionLevel\
\x12,\n\x12cache_rpc_response\x18\x04\x20\x01(\x08R\x10cacheRpcResponse\
\x12K\n\"disable_session_connection_sharing\x18\x05\x20\x01(\x08R\x1fdis\
ableSessionConnectionSharing\x125\n\x17num_channels_per_target\x18\x06\
\x20\x01(\x05R\x14numChannelsPerTarget\"?\n\x0fSessionMetadata\x12\x12\n\
\x04name\x18\x01\x20\x01(\tR\x04name\x12\x18\n\x07version\x18\x02\x20\
\x01(\x03R\x07version\"\xdc\x14\n\x0bConfigProto\x12K\n\x0cdevice_count\
\x18\x01\x20\x03(\x0b2(.tensorflow.ConfigProto.DeviceCountEntryR\x0bdevi\
ceCount\x12?\n\x1cintra_op_parallelism_threads\x18\x02\x20\x01(\x05R\x19\
intraOpParallelismThreads\x12?\n\x1cinter_op_parallelism_threads\x18\x05\
\x20\x01(\x05R\x19interOpParallelismThreads\x125\n\x17use_per_session_th\
reads\x18\t\x20\x01(\x08R\x14usePerSessionThreads\x12a\n\x1csession_inte\
r_op_thread_pool\x18\x0c\x20\x03(\x0b2!.tensorflow.ThreadPoolOptionProto\
R\x18sessionInterOpThreadPool\x12)\n\x10placement_period\x18\x03\x20\x01\
(\x05R\x0fplacementPeriod\x12%\n\x0edevice_filters\x18\x04\x20\x03(\tR\r\
deviceFilters\x127\n\x0bgpu_options\x18\x06\x20\x01(\x0b2\x16.tensorflow\
.GPUOptionsR\ngpuOptions\x120\n\x14allow_soft_placement\x18\x07\x20\x01(\
\x08R\x12allowSoftPlacement\x120\n\x14log_device_placement\x18\x08\x20\
\x01(\x08R\x12logDevicePlacement\x12=\n\rgraph_options\x18\n\x20\x01(\
\x0b2\x18.tensorflow.GraphOptionsR\x0cgraphOptions\x125\n\x17operation_t\
imeout_in_ms\x18\x0b\x20\x01(\x03R\x14operationTimeoutInMs\x127\n\x0brpc\
_options\x18\r\x20\x01(\x0b2\x16.tensorflow.RPCOptionsR\nrpcOptions\x127\
\n\x0bcluster_def\x18\x0e\x20\x01(\x0b2\x16.tensorflow.ClusterDefR\nclus\
terDef\x122\n\x15isolate_session_state\x18\x0f\x20\x01(\x08R\x13isolateS\
essionState\x12F\n\x20share_cluster_devices_in_session\x18\x11\x20\x01(\
\x08R\x1cshareClusterDevicesInSession\x12H\n\x0cexperimental\x18\x10\x20\
\x01(\x0b2$.tensorflow.ConfigProto.ExperimentalR\x0cexperimental\x1a>\n\
\x10DeviceCountEntry\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12\x14\
\n\x05value\x18\x02\x20\x01(\x05R\x05value:\x028\x01\x1a\x86\x0c\n\x0cEx\
perimental\x126\n\x17collective_group_leader\x18\x01\x20\x01(\tR\x15coll\
ectiveGroupLeader\x12#\n\rexecutor_type\x18\x03\x20\x01(\tR\x0cexecutorT\
ype\x12+\n\x12recv_buf_max_chunk\x18\x04\x20\x01(\x05R\x0frecvBufMaxChun\
k\x12*\n\x11use_numa_affinity\x18\x05\x20\x01(\x08R\x0fuseNumaAffinity\
\x12a\n-collective_deterministic_sequential_execution\x18\x06\x20\x01(\
\x08R*collectiveDeterministicSequentialExecution\x12'\n\x0fcollective_nc\
cl\x18\x07\x20\x01(\x08R\x0ecollectiveNccl\x12a\n.share_session_state_in\
_clusterspec_propagation\x18\x08\x20\x01(\x08R)shareSessionStateInCluste\
rspecPropagation\x126\n\x17disable_thread_spinning\x18\t\x20\x01(\x08R\
\x15disableThreadSpinning\x12F\n\x20share_cluster_devices_in_session\x18\
\n\x20\x01(\x08R\x1cshareClusterDevicesInSession\x12F\n\x10session_metad\
ata\x18\x0b\x20\x01(\x0b2\x1b.tensorflow.SessionMetadataR\x0fsessionMeta\
data\x129\n\x19optimize_for_static_graph\x18\x0c\x20\x01(\x08R\x16optimi\
zeForStaticGraph\x12,\n\x12enable_mlir_bridge\x18\r\x20\x01(\x08R\x10ena\
bleMlirBridge\x12f\n\x13mlir_bridge_rollout\x18\x11\x20\x01(\x0e26.tenso\
rflow.ConfigProto.Experimental.MlirBridgeRolloutR\x11mlirBridgeRollout\
\x12C\n\x1eenable_mlir_graph_optimization\x18\x10\x20\x01(\x08R\x1benabl\
eMlirGraphOptimization\x12E\n\x1fdisable_output_partition_graphs\x18\x0e\
\x20\x01(\x08R\x1cdisableOutputPartitionGraphs\x12=\n\x1bxla_fusion_auto\
tuner_thresh\x18\x0f\x20\x01(\x03R\x18xlaFusionAutotunerThresh\x12\x19\n\
\x08use_tfrt\x18\x12\x20\x01(\x08R\x07useTfrt\x12E\n\x1fdisable_function\
al_ops_lowering\x18\x15\x20\x01(\x08R\x1cdisableFunctionalOpsLowering\
\x12D\n\x1fxla_prefer_single_graph_cluster\x18\x16\x20\x01(\x08R\x1bxlaP\
referSingleGraphCluster\x12V\n\x13coordination_config\x18\x17\x20\x01(\
\x0b2%.tensorflow.CoordinationServiceConfigR\x12coordinationConfig\"\xda\
\x01\n\x11MlirBridgeRollout\x12#\n\x1fMLIR_BRIDGE_ROLLOUT_UNSPECIFIED\
\x10\0\x12\x1f\n\x1bMLIR_BRIDGE_ROLLOUT_ENABLED\x10\x01\x12\x20\n\x1cMLI\
R_BRIDGE_ROLLOUT_DISABLED\x10\x02\x12)\n%MLIR_BRIDGE_ROLLOUT_SAFE_MODE_E\
NABLED\x10\x03\x122\n.MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED\x10\
\x04J\x04\x08\x02\x10\x03J\x04\x08\x13\x10\x14J\x04\x08\x14\x10\x15\"\
\xa8\x06\n\nRunOptions\x12B\n\x0btrace_level\x18\x01\x20\x01(\x0e2!.tens\
orflow.RunOptions.TraceLevelR\ntraceLevel\x12\"\n\rtimeout_in_ms\x18\x02\
\x20\x01(\x03R\x0btimeoutInMs\x12/\n\x14inter_op_thread_pool\x18\x03\x20\
\x01(\x05R\x11interOpThreadPool\x126\n\x17output_partition_graphs\x18\
\x05\x20\x01(\x08R\x15outputPartitionGraphs\x12=\n\rdebug_options\x18\
\x06\x20\x01(\x0b2\x18.tensorflow.DebugOptionsR\x0cdebugOptions\x12J\n\"\
report_tensor_allocations_upon_oom\x18\x07\x20\x01(\x08R\x1ereportTensor\
AllocationsUponOom\x12G\n\x0cexperimental\x18\x08\x20\x01(\x0b2#.tensorf\
low.RunOptions.ExperimentalR\x0cexperimental\x1a\x9a\x02\n\x0cExperiment\
al\x120\n\x14collective_graph_key\x18\x01\x20\x01(\x03R\x12collectiveGra\
phKey\x12/\n\x14use_run_handler_pool\x18\x02\x20\x01(\x08R\x11useRunHand\
lerPool\x12r\n\x18run_handler_pool_options\x18\x03\x20\x01(\x0b29.tensor\
flow.RunOptions.Experimental.RunHandlerPoolOptionsR\x15runHandlerPoolOpt\
ions\x1a3\n\x15RunHandlerPoolOptions\x12\x1a\n\x08priority\x18\x01\x20\
\x01(\x03R\x08priority\"R\n\nTraceLevel\x12\x0c\n\x08NO_TRACE\x10\0\x12\
\x12\n\x0eSOFTWARE_TRACE\x10\x01\x12\x12\n\x0eHARDWARE_TRACE\x10\x02\x12\
\x0e\n\nFULL_TRACE\x10\x03J\x04\x08\x04\x10\x05\"\xfc\x03\n\x0bRunMetada\
ta\x124\n\nstep_stats\x18\x01\x20\x01(\x0b2\x15.tensorflow.StepStatsR\ts\
tepStats\x127\n\ncost_graph\x18\x02\x20\x01(\x0b2\x18.tensorflow.CostGra\
phDefR\tcostGraph\x12?\n\x10partition_graphs\x18\x03\x20\x03(\x0b2\x14.t\
ensorflow.GraphDefR\x0fpartitionGraphs\x12O\n\x0ffunction_graphs\x18\x04\
\x20\x03(\x0b2&.tensorflow.RunMetadata.FunctionGraphsR\x0efunctionGraphs\
\x1a\xeb\x01\n\x0eFunctionGraphs\x12?\n\x10partition_graphs\x18\x01\x20\
\x03(\x0b2\x14.tensorflow.GraphDefR\x0fpartitionGraphs\x12J\n\x16pre_opt\
imization_graph\x18\x02\x20\x01(\x0b2\x14.tensorflow.GraphDefR\x14preOpt\
imizationGraph\x12L\n\x17post_optimization_graph\x18\x03\x20\x01(\x0b2\
\x14.tensorflow.GraphDefR\x15postOptimizationGraph\"P\n\x10TensorConnect\
ion\x12\x1f\n\x0bfrom_tensor\x18\x01\x20\x01(\tR\nfromTensor\x12\x1b\n\t\
to_tensor\x18\x02\x20\x01(\tR\x08toTensor\"\xa5\x04\n\x0fCallableOptions\
\x12\x12\n\x04feed\x18\x01\x20\x03(\tR\x04feed\x12\x14\n\x05fetch\x18\
\x02\x20\x03(\tR\x05fetch\x12\x16\n\x06target\x18\x03\x20\x03(\tR\x06tar\
get\x127\n\x0brun_options\x18\x04\x20\x01(\x0b2\x16.tensorflow.RunOption\
sR\nrunOptions\x12I\n\x11tensor_connection\x18\x05\x20\x03(\x0b2\x1c.ten\
sorflow.TensorConnectionR\x10tensorConnection\x12O\n\x0cfeed_devices\x18\
\x06\x20\x03(\x0b2,.tensorflow.CallableOptions.FeedDevicesEntryR\x0bfeed\
Devices\x12R\n\rfetch_devices\x18\x07\x20\x03(\x0b2-.tensorflow.Callable\
Options.FetchDevicesEntryR\x0cfetchDevices\x12&\n\x0ffetch_skip_sync\x18\
\x08\x20\x01(\x08R\rfetchSkipSync\x1a>\n\x10FeedDevicesEntry\x12\x10\n\
\x03key\x18\x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\x01(\t\
R\x05value:\x028\x01\x1a?\n\x11FetchDevicesEntry\x12\x10\n\x03key\x18\
\x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\x01(\tR\x05value:\
\x028\x01B\x84\x01\n\x18org.tensorflow.frameworkB\x0cConfigProtosP\x01ZU\
github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_pr\
otos_go_proto\xf8\x01\x01b\x06proto3\
";
static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::Message::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
| 38.507748 | 188 | 0.609167 |
2f4f24bb6dd8401edc25f21c3c87d228ed4d6d4d | 17,814 | // Copyright 2020 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::collections::VecDeque;
use std::io::{self, Read, Write};
use std::ops::DerefMut;
use std::result;
use std::sync::Arc;
use std::thread;
use base::{error, Event, PollToken, RawDescriptor, WaitContext};
use data_model::{DataInit, Le16, Le32};
use remain::sorted;
use sync::Mutex;
use thiserror::Error as ThisError;
use vm_memory::GuestMemory;
use super::{
base_features, copy_config, Interrupt, Queue, Reader, SignalableInterrupt, VirtioDevice,
Writer, TYPE_CONSOLE,
};
use crate::{ProtectionType, SerialDevice};
pub(crate) const QUEUE_SIZE: u16 = 256;
// For now, just implement port 0 (receiveq and transmitq).
// If VIRTIO_CONSOLE_F_MULTIPORT is implemented, more queues will be needed.
const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
#[sorted]
#[derive(ThisError, Debug)]
pub enum ConsoleError {
/// There are no more available descriptors to receive into
#[error("no rx descriptors available")]
RxDescriptorsExhausted,
}
#[derive(Copy, Clone, Debug, Default)]
#[repr(C)]
pub struct virtio_console_config {
pub cols: Le16,
pub rows: Le16,
pub max_nr_ports: Le32,
pub emerg_wr: Le32,
}
// Safe because it only has data and has no implicit padding.
unsafe impl DataInit for virtio_console_config {}
/// Checks for input from `buffer` and transfers it to the receive queue, if any.
///
/// # Arguments
///
/// * `mem` - The GuestMemory to write the data into
/// * `interrupt` - SignalableInterrupt used to signal that the queue has been used
/// * `buffer` - Ring buffer providing data to put into the guest
/// * `receive_queue` - The receive virtio Queue
pub fn handle_input<I: SignalableInterrupt>(
mem: &GuestMemory,
interrupt: &I,
buffer: &mut VecDeque<u8>,
receive_queue: &mut Queue,
) -> result::Result<(), ConsoleError> {
let mut exhausted_queue = false;
loop {
let desc = match receive_queue.peek(mem) {
Some(d) => d,
None => {
exhausted_queue = true;
break;
}
};
let desc_index = desc.index;
// TODO(morg): Handle extra error cases as Err(ConsoleError) instead of just returning.
let mut writer = match Writer::new(mem.clone(), desc) {
Ok(w) => w,
Err(e) => {
error!("console: failed to create Writer: {}", e);
break;
}
};
while writer.available_bytes() > 0 && !buffer.is_empty() {
let (buffer_front, buffer_back) = buffer.as_slices();
let buffer_chunk = if !buffer_front.is_empty() {
buffer_front
} else {
buffer_back
};
let written = writer.write(buffer_chunk).unwrap();
drop(buffer.drain(..written));
}
let bytes_written = writer.bytes_written() as u32;
if bytes_written > 0 {
receive_queue.pop_peeked(mem);
receive_queue.add_used(mem, desc_index, bytes_written);
receive_queue.trigger_interrupt(mem, interrupt);
}
if bytes_written == 0 {
break;
}
}
if exhausted_queue {
Err(ConsoleError::RxDescriptorsExhausted)
} else {
Ok(())
}
}
/// Processes the data taken from the given transmit queue into the output sink.
///
/// # Arguments
///
/// * `mem` - The GuestMemory to take the data from
/// * `interrupt` - SignalableInterrupt used to signal (if required) that the queue has been used
/// * `transmit_queue` - The transmit virtio Queue
/// * `output` - The output sink we are going to write the data into
pub fn process_transmit_queue<I: SignalableInterrupt>(
mem: &GuestMemory,
interrupt: &I,
transmit_queue: &mut Queue,
output: &mut dyn io::Write,
) {
let mut needs_interrupt = false;
while let Some(avail_desc) = transmit_queue.pop(mem) {
let desc_index = avail_desc.index;
let reader = match Reader::new(mem.clone(), avail_desc) {
Ok(r) => r,
Err(e) => {
error!("console: failed to create reader: {}", e);
transmit_queue.add_used(mem, desc_index, 0);
needs_interrupt = true;
continue;
}
};
let len = match process_transmit_request(reader, output) {
Ok(written) => written,
Err(e) => {
error!("console: process_transmit_request failed: {}", e);
0
}
};
transmit_queue.add_used(mem, desc_index, len);
needs_interrupt = true;
}
if needs_interrupt {
transmit_queue.trigger_interrupt(mem, interrupt);
}
}
struct Worker {
mem: GuestMemory,
interrupt: Interrupt,
input: Option<Arc<Mutex<VecDeque<u8>>>>,
output: Box<dyn io::Write + Send>,
kill_evt: Event,
in_avail_evt: Event,
receive_queue: Queue,
receive_evt: Event,
transmit_queue: Queue,
transmit_evt: Event,
}
fn write_output(output: &mut dyn io::Write, data: &[u8]) -> io::Result<()> {
output.write_all(data)?;
output.flush()
}
/// Starts a thread that reads rx and sends the input back via the returned buffer.
///
/// The caller should listen on `in_avail_evt` for events. When `in_avail_evt` signals that data
/// is available, the caller should lock the returned `Mutex` and read data out of the inner
/// `VecDeque`. The data should be removed from the beginning of the `VecDeque` as it is processed.
///
/// # Arguments
///
/// * `rx` - Data source that the reader thread will wait on to send data back to the buffer
/// * `in_avail_evt` - Event triggered by the thread when new input is available on the buffer
pub fn spawn_input_thread(
mut rx: Box<dyn io::Read + Send>,
in_avail_evt: &Event,
) -> Option<Arc<Mutex<VecDeque<u8>>>> {
let buffer = Arc::new(Mutex::new(VecDeque::<u8>::new()));
let buffer_cloned = buffer.clone();
let thread_in_avail_evt = match in_avail_evt.try_clone() {
Ok(evt) => evt,
Err(e) => {
error!("failed to clone in_avail_evt: {}", e);
return None;
}
};
// The input thread runs in detached mode.
let res = thread::Builder::new()
.name("console_input".to_string())
.spawn(move || {
let mut rx_buf = [0u8; 1 << 12];
loop {
match rx.read(&mut rx_buf) {
Ok(0) => break, // Assume the stream of input has ended.
Ok(size) => {
buffer.lock().extend(&rx_buf[0..size]);
thread_in_avail_evt.write(1).unwrap();
}
Err(e) => {
// Being interrupted is not an error, but everything else is.
if e.kind() != io::ErrorKind::Interrupted {
error!(
"failed to read for bytes to queue into console device: {}",
e
);
break;
}
}
}
}
});
if let Err(e) = res {
error!("failed to spawn input thread: {}", e);
return None;
}
Some(buffer_cloned)
}
/// Writes the available data from the reader into the given output queue.
///
/// # Arguments
///
/// * `reader` - The Reader with the data we want to write.
/// * `output` - The output sink we are going to write the data to.
pub fn process_transmit_request(mut reader: Reader, output: &mut dyn io::Write) -> io::Result<u32> {
let len = reader.available_bytes();
let mut data = vec![0u8; len];
reader.read_exact(&mut data)?;
write_output(output, &data)?;
Ok(0)
}
impl Worker {
fn run(&mut self) {
#[derive(PollToken)]
enum Token {
ReceiveQueueAvailable,
TransmitQueueAvailable,
InputAvailable,
InterruptResample,
Kill,
}
let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
(&self.transmit_evt, Token::TransmitQueueAvailable),
(&self.receive_evt, Token::ReceiveQueueAvailable),
(&self.in_avail_evt, Token::InputAvailable),
(&self.kill_evt, Token::Kill),
]) {
Ok(pc) => pc,
Err(e) => {
error!("failed creating WaitContext: {}", e);
return;
}
};
if let Some(resample_evt) = self.interrupt.get_resample_evt() {
if wait_ctx
.add(resample_evt, Token::InterruptResample)
.is_err()
{
error!("failed adding resample event to WaitContext.");
return;
}
}
'wait: loop {
let events = match wait_ctx.wait() {
Ok(v) => v,
Err(e) => {
error!("failed polling for events: {}", e);
break;
}
};
for event in events.iter().filter(|e| e.is_readable) {
match event.token {
Token::TransmitQueueAvailable => {
if let Err(e) = self.transmit_evt.read() {
error!("failed reading transmit queue Event: {}", e);
break 'wait;
}
process_transmit_queue(
&self.mem,
&self.interrupt,
&mut self.transmit_queue,
&mut self.output,
);
}
Token::ReceiveQueueAvailable => {
if let Err(e) = self.receive_evt.read() {
error!("failed reading receive queue Event: {}", e);
break 'wait;
}
if let Some(in_buf_ref) = self.input.as_ref() {
match handle_input(
&self.mem,
&self.interrupt,
in_buf_ref.lock().deref_mut(),
&mut self.receive_queue,
) {
Ok(()) => {}
// Console errors are no-ops, so just continue.
Err(_) => {
continue;
}
}
}
}
Token::InputAvailable => {
if let Err(e) = self.in_avail_evt.read() {
error!("failed reading in_avail_evt: {}", e);
break 'wait;
}
if let Some(in_buf_ref) = self.input.as_ref() {
match handle_input(
&self.mem,
&self.interrupt,
in_buf_ref.lock().deref_mut(),
&mut self.receive_queue,
) {
Ok(()) => {}
// Console errors are no-ops, so just continue.
Err(_) => {
continue;
}
}
}
}
Token::InterruptResample => {
self.interrupt.interrupt_resample();
}
Token::Kill => break 'wait,
}
}
}
}
}
enum ConsoleInput {
FromRead(Box<dyn io::Read + Send>),
FromThread(Arc<Mutex<VecDeque<u8>>>),
}
/// Virtio console device.
pub struct Console {
base_features: u64,
kill_evt: Option<Event>,
in_avail_evt: Option<Event>,
worker_thread: Option<thread::JoinHandle<Worker>>,
input: Option<ConsoleInput>,
output: Option<Box<dyn io::Write + Send>>,
keep_rds: Vec<RawDescriptor>,
}
impl SerialDevice for Console {
fn new(
protected_vm: ProtectionType,
_evt: Event,
input: Option<Box<dyn io::Read + Send>>,
output: Option<Box<dyn io::Write + Send>>,
keep_rds: Vec<RawDescriptor>,
) -> Console {
Console {
base_features: base_features(protected_vm),
in_avail_evt: None,
kill_evt: None,
worker_thread: None,
input: input.map(ConsoleInput::FromRead),
output,
keep_rds,
}
}
}
impl Drop for Console {
fn drop(&mut self) {
if let Some(kill_evt) = self.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
if let Some(worker_thread) = self.worker_thread.take() {
let _ = worker_thread.join();
}
}
}
impl VirtioDevice for Console {
fn keep_rds(&self) -> Vec<RawDescriptor> {
self.keep_rds.clone()
}
fn features(&self) -> u64 {
self.base_features
}
fn device_type(&self) -> u32 {
TYPE_CONSOLE
}
fn queue_max_sizes(&self) -> &[u16] {
QUEUE_SIZES
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let config = virtio_console_config {
max_nr_ports: 1.into(),
..Default::default()
};
copy_config(data, 0, config.as_slice(), offset);
}
fn activate(
&mut self,
mem: GuestMemory,
interrupt: Interrupt,
mut queues: Vec<Queue>,
mut queue_evts: Vec<Event>,
) {
if queues.len() < 2 || queue_evts.len() < 2 {
return;
}
let (self_kill_evt, kill_evt) = match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
Ok(v) => v,
Err(e) => {
error!("failed creating kill Event pair: {}", e);
return;
}
};
self.kill_evt = Some(self_kill_evt);
if self.in_avail_evt.is_none() {
self.in_avail_evt = match Event::new() {
Ok(evt) => Some(evt),
Err(e) => {
error!("failed creating Event: {}", e);
return;
}
};
}
let in_avail_evt = match self.in_avail_evt.as_ref().unwrap().try_clone() {
Ok(v) => v,
Err(e) => {
error!("failed creating input available Event pair: {}", e);
return;
}
};
// Spawn a separate thread to poll self.input.
// A thread is used because io::Read only provides a blocking interface, and there is no
// generic way to add an io::Read instance to a poll context (it may not be backed by a file
// descriptor). Moving the blocking read call to a separate thread and sending data back to
// the main worker thread with an event for notification bridges this gap.
let input = match self.input.take() {
Some(ConsoleInput::FromRead(read)) => {
let buffer = spawn_input_thread(read, self.in_avail_evt.as_ref().unwrap());
if buffer.is_none() {
error!("failed creating input thread");
};
buffer
}
Some(ConsoleInput::FromThread(buffer)) => Some(buffer),
None => None,
};
let output = self.output.take().unwrap_or_else(|| Box::new(io::sink()));
let worker_result = thread::Builder::new()
.name("virtio_console".to_string())
.spawn(move || {
let mut worker = Worker {
mem,
interrupt,
input,
output,
in_avail_evt,
kill_evt,
// Device -> driver
receive_queue: queues.remove(0),
receive_evt: queue_evts.remove(0),
// Driver -> device
transmit_queue: queues.remove(0),
transmit_evt: queue_evts.remove(0),
};
worker.run();
worker
});
match worker_result {
Err(e) => {
error!("failed to spawn virtio_console worker: {}", e);
}
Ok(join_handle) => {
self.worker_thread = Some(join_handle);
}
}
}
fn reset(&mut self) -> bool {
if let Some(kill_evt) = self.kill_evt.take() {
if kill_evt.write(1).is_err() {
error!("{}: failed to notify the kill event", self.debug_label());
return false;
}
}
if let Some(worker_thread) = self.worker_thread.take() {
match worker_thread.join() {
Err(_) => {
error!("{}: failed to get back resources", self.debug_label());
return false;
}
Ok(worker) => {
self.input = worker.input.map(ConsoleInput::FromThread);
self.output = Some(worker.output);
return true;
}
}
}
false
}
}
| 32.988889 | 100 | 0.50174 |
9cf323eff145f84c6733a02435c7593885d41c93 | 319 | #![feature(assoc_char_funcs)]
extern crate termion;
use std::env;
mod lib;
fn main() {
env_logger::init();
if env::var("INTERACTIVE_MODE").is_ok() {
lib::run_interactive();
} else if env::var("COMPARISON").is_ok() {
lib::run_comparison();
} else {
lib::run_tests();
}
}
| 15.95 | 46 | 0.573668 |
8fdba18c13f27fa490a8dacdbfedfef34b46389e | 1,036 | use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
/// Removes an empty directory.
///
/// This function is an async version of [`std::fs::remove_dir`].
///
/// [`std::fs::remove_dir`]: https://doc.rust-lang.org/std/fs/fn.remove_dir.html
///
/// # Errors
///
/// An error will be returned in the following situations:
///
/// * `path` is not an existing and empty directory.
/// * The current process lacks permissions to remove the directory.
/// * Some other I/O error occurred.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// fs::remove_dir("./some/directory").await?;
/// #
/// # Ok(()) }) }
/// ```
pub async fn remove_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::remove_dir(&path)
.context(|| format!("could not remove directory `{}`", path.display()))
})
.await
}
| 26.564103 | 83 | 0.600386 |
bb64d20fd6e310850b5de5505b4b0bc3c91b3beb | 1,733 | use crate::router::Router;
use std::any::TypeId;
use std::ops::Deref;
use std::rc::Rc;
/// Data that was provided by the developer.
///
/// ```ignore
/// struct State {
/// count: u8
/// }
///
/// #[route(path = "/")]
/// fn route_provided_data(state: Provided<State>) -> VirtualNode {
/// VirtualNode::Text(format!("Count: {}", state.count).into())
/// }
///
/// fn main () {
/// let mut router = Router::new(vec![]);
/// router.provide(State {count: 50});
/// }
/// ```
pub struct Provided<T> {
/// The application data to provide to a route.
pub data: Rc<T>,
}
impl<T> Deref for Provided<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> Clone for Provided<T> {
fn clone(&self) -> Self {
Provided {
data: Rc::clone(&self.data),
}
}
}
impl Router {
/// Provide the application state data that different routes need.
pub fn provide<T: 'static>(&mut self, provided: T) {
let provided = Provided {
data: Rc::new(provided),
};
let type_id = TypeId::of::<Provided<T>>();
let provided = Box::new(provided);
self.provided.borrow_mut().insert(type_id, provided);
}
}
#[cfg(test)]
mod tests {
use super::*;
struct State {
count: u8,
}
#[test]
fn provide() {
let mut router = Router::new(vec![]);
router.provide(State { count: 50 });
let state = router.provided.borrow();
let state = state
.get(&TypeId::of::<Provided<State>>())
.unwrap()
.downcast_ref::<Provided<State>>()
.expect("Downcast state");
assert_eq!(state.count, 50);
}
}
| 21.134146 | 70 | 0.534911 |
d9256c77e5725126797a5593bed1e1077f6a9765 | 2,902 | #[doc = "Register `RBQBAPQ[%s]` reader"]
pub struct R(crate::R<RBQBAPQ_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<RBQBAPQ_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<RBQBAPQ_SPEC>> for R {
fn from(reader: crate::R<RBQBAPQ_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `RBQBAPQ[%s]` writer"]
pub struct W(crate::W<RBQBAPQ_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<RBQBAPQ_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<RBQBAPQ_SPEC>> for W {
fn from(writer: crate::W<RBQBAPQ_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `RXBQBA` reader - Receive Buffer Queue Base Address"]
pub struct RXBQBA_R(crate::FieldReader<u8, u8>);
impl RXBQBA_R {
pub(crate) fn new(bits: u8) -> Self {
RXBQBA_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RXBQBA_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RXBQBA` writer - Receive Buffer Queue Base Address"]
pub struct RXBQBA_W<'a> {
w: &'a mut W,
}
impl<'a> RXBQBA_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x3f << 2)) | ((value as u32 & 0x3f) << 2);
self.w
}
}
impl R {
#[doc = "Bits 2:7 - Receive Buffer Queue Base Address"]
#[inline(always)]
pub fn rxbqba(&self) -> RXBQBA_R {
RXBQBA_R::new(((self.bits >> 2) & 0x3f) as u8)
}
}
impl W {
#[doc = "Bits 2:7 - Receive Buffer Queue Base Address"]
#[inline(always)]
pub fn rxbqba(&mut self) -> RXBQBA_W {
RXBQBA_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Receive Buffer Queue Base Address Register Priority Queue (index = 1)\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [rbqbapq](index.html) module"]
pub struct RBQBAPQ_SPEC;
impl crate::RegisterSpec for RBQBAPQ_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [rbqbapq::R](R) reader structure"]
impl crate::Readable for RBQBAPQ_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [rbqbapq::W](W) writer structure"]
impl crate::Writable for RBQBAPQ_SPEC {
type Writer = W;
}
| 31.204301 | 379 | 0.607512 |
cc85869b1fc6a0549c73ba8b3fd4301705664870 | 8,148 | use std::fmt::{Debug, Formatter};
use libipt_sys::{
pt_packet,
pt_packet_type_ppt_cbr as PT_PACKET_TYPE_PPT_CBR,
pt_packet_type_ppt_cyc as PT_PACKET_TYPE_PPT_CYC,
pt_packet_type_ppt_exstop as PT_PACKET_TYPE_PPT_EXSTOP,
pt_packet_type_ppt_fup as PT_PACKET_TYPE_PPT_FUP,
pt_packet_type_ppt_invalid as PT_PACKET_TYPE_PPT_INVALID,
pt_packet_type_ppt_mnt as PT_PACKET_TYPE_PPT_MNT,
pt_packet_type_ppt_mode as PT_PACKET_TYPE_PPT_MODE,
pt_packet_type_ppt_mtc as PT_PACKET_TYPE_PPT_MTC,
pt_packet_type_ppt_mwait as PT_PACKET_TYPE_PPT_MWAIT,
pt_packet_type_ppt_ovf as PT_PACKET_TYPE_PPT_OVF,
pt_packet_type_ppt_pad as PT_PACKET_TYPE_PPT_PAD,
pt_packet_type_ppt_pip as PT_PACKET_TYPE_PPT_PIP,
pt_packet_type_ppt_psb as PT_PACKET_TYPE_PPT_PSB,
pt_packet_type_ppt_psbend as PT_PACKET_TYPE_PPT_PSBEND,
pt_packet_type_ppt_ptw as PT_PACKET_TYPE_PPT_PTW,
pt_packet_type_ppt_pwre as PT_PACKET_TYPE_PPT_PWRE,
pt_packet_type_ppt_pwrx as PT_PACKET_TYPE_PPT_PWRX,
pt_packet_type_ppt_stop as PT_PACKET_TYPE_PPT_STOP,
pt_packet_type_ppt_tip as PT_PACKET_TYPE_PPT_TIP,
pt_packet_type_ppt_tip_pgd as PT_PACKET_TYPE_PPT_TIP_PGD,
pt_packet_type_ppt_tip_pge as PT_PACKET_TYPE_PPT_TIP_PGE,
pt_packet_type_ppt_tma as PT_PACKET_TYPE_PPT_TMA,
pt_packet_type_ppt_tnt_8 as PT_PACKET_TYPE_PPT_TNT_8,
pt_packet_type_ppt_tnt_64 as PT_PACKET_TYPE_PPT_TNT_64,
pt_packet_type_ppt_tsc as PT_PACKET_TYPE_PPT_TSC,
pt_packet_type_ppt_unknown as PT_PACKET_TYPE_PPT_UNKNOWN,
pt_packet_type_ppt_vmcs as PT_PACKET_TYPE_PPT_VMCS
};
#[macro_use]
mod conversions;
mod pad;
pub use pad::*;
mod ovf;
pub use ovf::*;
mod psb;
pub use psb::*;
mod psbend;
pub use psbend::*;
mod stop;
pub use stop::*;
mod invalid;
pub use invalid::*;
mod tnt;
pub use tnt::*;
mod ip;
pub use ip::*;
mod mode;
pub use mode::*;
mod pip;
pub use pip::*;
mod tsc;
pub use tsc::*;
mod cbr;
pub use cbr::*;
mod tma;
pub use tma::*;
mod mtc;
pub use mtc::*;
mod cyc;
pub use cyc::*;
mod vmcs;
pub use vmcs::*;
mod mnt;
pub use mnt::*;
mod exstop;
pub use exstop::*;
mod mwait;
pub use mwait::*;
mod pwre;
pub use pwre::*;
mod pwrx;
pub use pwrx::*;
mod ptw;
pub use ptw::*;
mod unknown;
pub use unknown::*;
mod decoder;
pub use decoder::PacketDecoder;
mod encoder;
pub use encoder::Encoder;
#[cfg(test)]
mod test {
use super::*;
use libipt_sys::pt_packet_mnt;
use libipt_sys::pt_packet__bindgen_ty_1;
#[test]
fn test_pkt_from() {
let p1 = pt_packet_mnt { payload: 666 };
let p2 = pt_packet {
type_: PT_PACKET_TYPE_PPT_MNT,
size: std::mem::size_of::<pt_packet_mnt>() as u8,
payload: pt_packet__bindgen_ty_1 { mnt: p1 }
};
let p3: Packet::<()> = p2.into();
match p3 {
Packet::Mnt(m) => assert_eq!(m.payload(), p1.payload),
_ => unreachable!()
};
}
}
pub enum Packet<T> {
Invalid(invalid::Invalid),
Psbend(psbend::Psbend),
Stop(stop::Stop),
Pad(pad::Pad),
Psb(psb::Psb),
Ovf(ovf::Ovf),
Unknown(unknown::Unknown<T>),
Fup(ip::Fup),
Tip(ip::Tip),
TipPge(ip::TipPge),
TipPgd(ip::TipPgd),
Tnt8(tnt::Tnt8),
Tnt64(tnt::Tnt64),
Mode(mode::Mode),
Pip(pip::Pip),
Vmcs(vmcs::Vmcs),
Cbr(cbr::Cbr),
Tsc(tsc::Tsc),
Tma(tma::Tma),
Mtc(mtc::Mtc),
Cyc(cyc::Cyc),
Mnt(mnt::Mnt),
Exstop(exstop::Exstop),
Mwait(mwait::Mwait),
Pwre(pwre::Pwre),
Pwrx(pwrx::Pwrx),
Ptw(ptw::Ptw)
}
impl<T> Debug for Packet<T> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self {
Self::Invalid(pack) => f.write_fmt(format_args!("Invalid({:?})", pack)),
Self::Psbend(pack) => f.write_fmt(format_args!("Psbend({:?})", pack)),
Self::Stop(pack) => f.write_fmt(format_args!("Stop({:?})", pack)),
Self::Pad(pack) => f.write_fmt(format_args!("Pad({:?})", pack)),
Self::Psb(pack) => f.write_fmt(format_args!("Psb({:?})", pack)),
Self::Ovf(pack) => f.write_fmt(format_args!("Ovf({:?})", pack)),
Self::Unknown(_) => f.write_str("Unknown"),
Self::Fup(pack) => f.write_fmt(format_args!("Fup({:?})", pack)),
Self::Tip(pack) => f.write_fmt(format_args!("Tip({:?})", pack)),
Self::TipPge(pack) => f.write_fmt(format_args!("TipPge({:?})", pack)),
Self::TipPgd(pack) => f.write_fmt(format_args!("TipPgd({:?})", pack)),
Self::Tnt8(pack) => f.write_fmt(format_args!("Tnt8({:?})", pack)),
Self::Tnt64(pack) => f.write_fmt(format_args!("Tnt64({:?})", pack)),
Self::Mode(pack) => f.write_fmt(format_args!("Mode({:?})", pack)),
Self::Pip(pack) => f.write_fmt(format_args!("Pip({:?})", pack)),
Self::Vmcs(pack) => f.write_fmt(format_args!("Vmcs({:?})", pack)),
Self::Cbr(pack) => f.write_fmt(format_args!("Cbr({:?})", pack)),
Self::Tsc(pack) => f.write_fmt(format_args!("Tsc({:?})", pack)),
Self::Tma(pack) => f.write_fmt(format_args!("Tma({:?})", pack)),
Self::Mtc(pack) => f.write_fmt(format_args!("Mtc({:?})", pack)),
Self::Cyc(pack) => f.write_fmt(format_args!("Cyc({:?})", pack)),
Self::Mnt(pack) => f.write_fmt(format_args!("Mnt({:?})", pack)),
Self::Exstop(pack) => f.write_fmt(format_args!("Exstop({:?})", pack)),
Self::Mwait(pack) => f.write_fmt(format_args!("Mwait({:?})", pack)),
Self::Pwre(pack) => f.write_fmt(format_args!("Pwre({:?})", pack)),
Self::Pwrx(pack) => f.write_fmt(format_args!("Pwrx({:?})", pack)),
Self::Ptw(pack) => f.write_fmt(format_args!("Ptw({:?})", pack)),
}
}
}
impl<T> From<pt_packet> for Packet<T> {
fn from(pkt: pt_packet) -> Self {
unsafe {
match pkt.type_ {
PT_PACKET_TYPE_PPT_CBR => Packet::Cbr(pkt.payload.cbr.into()),
PT_PACKET_TYPE_PPT_CYC => Packet::Cyc(pkt.payload.cyc.into()),
PT_PACKET_TYPE_PPT_EXSTOP => Packet::Exstop(pkt.payload.exstop.into()),
PT_PACKET_TYPE_PPT_FUP => Packet::Fup(pkt.payload.ip.into()),
PT_PACKET_TYPE_PPT_INVALID => Packet::Invalid(pkt.into()),
PT_PACKET_TYPE_PPT_MNT => Packet::Mnt(pkt.payload.mnt.into()),
PT_PACKET_TYPE_PPT_MODE => Packet::Mode(pkt.payload.mode.into()),
PT_PACKET_TYPE_PPT_MTC => Packet::Mtc(pkt.payload.mtc.into()),
PT_PACKET_TYPE_PPT_MWAIT => Packet::Mwait(pkt.payload.mwait.into()),
PT_PACKET_TYPE_PPT_OVF => Packet::Ovf(pkt.into()),
PT_PACKET_TYPE_PPT_PAD => Packet::Pad(pkt.into()),
PT_PACKET_TYPE_PPT_PIP => Packet::Pip(pkt.payload.pip.into()),
PT_PACKET_TYPE_PPT_PSB => Packet::Psb(pkt.into()),
PT_PACKET_TYPE_PPT_PSBEND => Packet::Psbend(pkt.into()),
PT_PACKET_TYPE_PPT_PTW => Packet::Ptw(pkt.payload.ptw.into()),
PT_PACKET_TYPE_PPT_PWRE => Packet::Pwre(pkt.payload.pwre.into()),
PT_PACKET_TYPE_PPT_PWRX => Packet::Pwrx(pkt.payload.pwrx.into()),
PT_PACKET_TYPE_PPT_STOP => Packet::Stop(pkt.into()),
PT_PACKET_TYPE_PPT_TIP => Packet::Tip(pkt.payload.ip.into()),
PT_PACKET_TYPE_PPT_TIP_PGD => Packet::TipPgd(pkt.payload.ip.into()),
PT_PACKET_TYPE_PPT_TIP_PGE => Packet::TipPge(pkt.payload.ip.into()),
PT_PACKET_TYPE_PPT_TMA => Packet::Tma(pkt.payload.tma.into()),
PT_PACKET_TYPE_PPT_TNT_8 => Packet::Tnt8(pkt.payload.tnt.into()),
PT_PACKET_TYPE_PPT_TNT_64 => Packet::Tnt64(pkt.payload.tnt.into()),
PT_PACKET_TYPE_PPT_TSC => Packet::Tsc(pkt.payload.tsc.into()),
PT_PACKET_TYPE_PPT_VMCS => Packet::Vmcs(pkt.payload.vmcs.into()),
PT_PACKET_TYPE_PPT_UNKNOWN => Packet::Unknown(unknown::Unknown::<T>::from(pkt.payload.unknown)),
_ => unreachable!("invalid packet type")
}
}
}
}
| 38.074766 | 112 | 0.620643 |
163d0545499a34234da96b1493d053b092716e95 | 4,450 | #![warn(clippy::all, clippy::pedantic)]
use std::{
fs::File,
io::{prelude::*, BufReader},
path::Path,
};
#[derive(Debug, PartialEq)]
pub struct Floor {
map: Vec<Vec<u8>>,
}
impl Floor {
pub fn new(filename: impl AsRef<Path>) -> Floor {
let file = File::open(filename).expect("file doesn't exist");
let reader = BufReader::new(file);
Floor {
map: reader
.lines()
.map(|line| {
line
.expect("could not parse line")
.split("")
.filter(|height| !height.is_empty())
.map(|height| height.parse::<u8>().expect("not a number"))
.collect()
})
.collect(),
}
}
fn is_low_point(&self, row: &[u8], y: usize, x: usize, height: u8) -> bool {
if x > 0 && row[x - 1] <= height {
return false;
}
if x < row.len() - 1 && row[x + 1] <= height {
return false;
}
if y > 0 && self.map[y - 1][x] <= height {
return false;
}
if y < self.map.len() - 1 && self.map[y + 1][x] <= height {
return false;
}
true
}
fn find_all_low_points(&self) -> Vec<u8> {
let mut low_points = vec![];
for (y, row) in self.map.iter().enumerate() {
for (x, height) in row.iter().enumerate() {
if self.is_low_point(row, y, x, *height) {
low_points.push(*height);
}
}
}
low_points
}
fn find_full_basin(&self, mut basin: Vec<(usize, usize)>) -> Vec<(usize, usize)> {
let (y, x) = basin[basin.len() - 1];
let height = self.map[y][x];
if x > 0 {
let tile_before_pos = (y, x - 1);
let tile_before = self.map[tile_before_pos.0][tile_before_pos.1];
if tile_before >= height && tile_before != 9 && !basin.contains(&tile_before_pos) {
basin.push(tile_before_pos);
basin = self.find_full_basin(basin);
}
}
if x < self.map[y].len() - 1 {
let tile_after_pos = (y, x + 1);
let tile_after = self.map[tile_after_pos.0][tile_after_pos.1];
if tile_after >= height && tile_after != 9 && !basin.contains(&tile_after_pos) {
basin.push(tile_after_pos);
basin = self.find_full_basin(basin);
}
}
if y > 0 {
let tile_prev_row_pos = (y - 1, x);
let tile_prev_row = self.map[tile_prev_row_pos.0][tile_prev_row_pos.1];
if tile_prev_row >= height && tile_prev_row != 9 && !basin.contains(&tile_prev_row_pos) {
basin.push(tile_prev_row_pos);
basin = self.find_full_basin(basin);
}
}
if y < self.map.len() - 1 {
let tile_next_row_pos = (y + 1, x);
let tile_next_row = self.map[tile_next_row_pos.0][tile_next_row_pos.1];
if tile_next_row >= height && tile_next_row != 9 && !basin.contains(&tile_next_row_pos) {
basin.push(tile_next_row_pos);
basin = self.find_full_basin(basin);
}
}
basin
}
#[must_use]
pub fn find_combined_size_of_biggest_basins(&self) -> usize {
let mut basin_sizes = vec![];
for (y, row) in self.map.iter().enumerate() {
for (x, height) in row.iter().enumerate() {
if self.is_low_point(row, y, x, *height) {
let basin = self.find_full_basin(vec![(y, x)]);
if basin.len() > 1 {
basin_sizes.push(basin.len());
}
}
}
}
basin_sizes.sort_unstable();
basin_sizes.iter().rev().take(3).product()
}
#[must_use]
pub fn get_risk_level(&self) -> u32 {
self
.find_all_low_points()
.iter()
.fold(0, |total, height| total + 1 + u32::from(*height))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_new_floor_example() {
assert_eq!(
Floor::new("example.txt"),
Floor {
map: vec![
vec![2, 1, 9, 9, 9, 4, 3, 2, 1, 0],
vec![3, 9, 8, 7, 8, 9, 4, 9, 2, 1],
vec![9, 8, 5, 6, 7, 8, 9, 8, 9, 2],
vec![8, 7, 6, 7, 8, 9, 6, 7, 8, 9],
vec![9, 8, 9, 9, 9, 6, 5, 6, 7, 8]
]
}
);
}
#[test]
fn find_all_low_points_example() {
assert_eq!(
Floor::new("example.txt").find_all_low_points(),
vec![1, 0, 5, 5]
);
}
#[test]
fn get_risk_level_example() {
assert_eq!(Floor::new("example.txt").get_risk_level(), 15);
}
#[test]
fn find_combined_size_of_biggest_basins_example() {
assert_eq!(
Floor::new("example.txt").find_combined_size_of_biggest_basins(),
1134
);
}
}
| 25.722543 | 95 | 0.542921 |
e225110900bb142f30bc58d216f41f2dc5d2980b | 30,503 | //! EVM gasometer.
#![deny(warnings)]
#![forbid(unsafe_code, unused_variables)]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
#[cfg(feature = "tracing")]
pub mod tracing;
#[cfg(feature = "tracing")]
macro_rules! event {
($x:expr) => {
use crate::tracing::Event::*;
$x.emit();
};
}
#[cfg(not(feature = "tracing"))]
macro_rules! event {
($x:expr) => {};
}
mod consts;
mod costs;
mod memory;
mod utils;
use alloc::vec::Vec;
use core::cmp::max;
use evm_core::{ExitError, Opcode, Stack};
use evm_runtime::{Config, Handler};
use primitive_types::{H160, H256, U256};
macro_rules! try_or_fail {
( $inner:expr, $e:expr ) => {
match $e {
Ok(value) => value,
Err(e) => {
$inner = Err(e.clone());
return Err(e);
}
}
};
}
#[derive(Debug, Copy, Clone)]
pub struct Snapshot {
pub gas_limit: u64,
pub memory_gas: u64,
pub used_gas: u64,
pub refunded_gas: i64,
}
/// EVM gasometer.
#[derive(Clone)]
pub struct Gasometer<'config> {
gas_limit: u64,
config: &'config Config,
inner: Result<Inner<'config>, ExitError>,
}
impl<'config> Gasometer<'config> {
/// Create a new gasometer with given gas limit and config.
pub fn new(gas_limit: u64, config: &'config Config) -> Self {
Self {
gas_limit,
config,
inner: Ok(Inner {
memory_gas: 0,
used_gas: 0,
refunded_gas: 0,
config,
}),
}
}
#[inline]
/// Returns the numerical gas cost value.
pub fn gas_cost(&self, cost: GasCost, gas: u64) -> Result<u64, ExitError> {
match self.inner.as_ref() {
Ok(inner) => inner.gas_cost(cost, gas),
Err(e) => Err(e.clone()),
}
}
#[inline]
fn inner_mut(&mut self) -> Result<&mut Inner<'config>, ExitError> {
self.inner.as_mut().map_err(|e| e.clone())
}
#[inline]
/// Reference of the config.
pub fn config(&self) -> &'config Config {
self.config
}
#[inline]
/// Remaining gas.
pub fn gas(&self) -> u64 {
match self.inner.as_ref() {
Ok(inner) => self.gas_limit - inner.used_gas - inner.memory_gas,
Err(_) => 0,
}
}
#[inline]
/// Total used gas.
pub fn total_used_gas(&self) -> u64 {
match self.inner.as_ref() {
Ok(inner) => inner.used_gas + inner.memory_gas,
Err(_) => self.gas_limit,
}
}
#[inline]
/// Refunded gas.
pub fn refunded_gas(&self) -> i64 {
match self.inner.as_ref() {
Ok(inner) => inner.refunded_gas,
Err(_) => 0,
}
}
/// Explictly fail the gasometer with out of gas. Return `OutOfGas` error.
pub fn fail(&mut self) -> ExitError {
self.inner = Err(ExitError::OutOfGas);
ExitError::OutOfGas
}
#[inline]
/// Record an explict cost.
pub fn record_cost(&mut self, cost: u64) -> Result<(), ExitError> {
event!(RecordCost {
cost,
snapshot: self.snapshot()?,
});
let all_gas_cost = self.total_used_gas() + cost;
if self.gas_limit < all_gas_cost {
self.inner = Err(ExitError::OutOfGas);
return Err(ExitError::OutOfGas);
}
self.inner_mut()?.used_gas += cost;
Ok(())
}
#[inline]
/// Record an explict refund.
pub fn record_refund(&mut self, refund: i64) -> Result<(), ExitError> {
event!(RecordRefund {
refund,
snapshot: self.snapshot()?,
});
self.inner_mut()?.refunded_gas += refund;
Ok(())
}
#[inline]
/// Record `CREATE` code deposit.
pub fn record_deposit(&mut self, len: usize) -> Result<(), ExitError> {
let cost = len as u64 * consts::G_CODEDEPOSIT;
self.record_cost(cost)
}
/// Record opcode gas cost.
pub fn record_dynamic_cost(
&mut self,
cost: GasCost,
memory: Option<MemoryCost>,
) -> Result<(), ExitError> {
let gas = self.gas();
let memory_gas = match memory {
Some(memory) => try_or_fail!(self.inner, self.inner_mut()?.memory_gas(memory)),
None => self.inner_mut()?.memory_gas,
};
let gas_cost = try_or_fail!(self.inner, self.inner_mut()?.gas_cost(cost, gas));
let gas_refund = self.inner_mut()?.gas_refund(cost);
let used_gas = self.inner_mut()?.used_gas;
event!(RecordDynamicCost {
gas_cost,
memory_gas,
gas_refund,
snapshot: self.snapshot()?,
});
let all_gas_cost = memory_gas + used_gas + gas_cost;
if self.gas_limit < all_gas_cost {
self.inner = Err(ExitError::OutOfGas);
return Err(ExitError::OutOfGas);
}
let after_gas = self.gas_limit - all_gas_cost;
try_or_fail!(self.inner, self.inner_mut()?.extra_check(cost, after_gas));
self.inner_mut()?.used_gas += gas_cost;
self.inner_mut()?.memory_gas = memory_gas;
self.inner_mut()?.refunded_gas += gas_refund;
Ok(())
}
#[inline]
/// Record opcode stipend.
pub fn record_stipend(&mut self, stipend: u64) -> Result<(), ExitError> {
event!(RecordStipend {
stipend,
snapshot: self.snapshot()?,
});
self.inner_mut()?.used_gas -= stipend;
Ok(())
}
/// Record transaction cost.
pub fn record_transaction(&mut self, cost: TransactionCost) -> Result<(), ExitError> {
let gas_cost = match cost {
TransactionCost::Call {
zero_data_len,
non_zero_data_len,
access_list_address_len,
access_list_storage_len,
} => {
self.config.gas_transaction_call
+ zero_data_len as u64 * self.config.gas_transaction_zero_data
+ non_zero_data_len as u64 * self.config.gas_transaction_non_zero_data
+ access_list_address_len as u64 * self.config.gas_access_list_address
+ access_list_storage_len as u64 * self.config.gas_access_list_storage_key
}
TransactionCost::Create {
zero_data_len,
non_zero_data_len,
access_list_address_len,
access_list_storage_len,
} => {
self.config.gas_transaction_create
+ zero_data_len as u64 * self.config.gas_transaction_zero_data
+ non_zero_data_len as u64 * self.config.gas_transaction_non_zero_data
+ access_list_address_len as u64 * self.config.gas_access_list_address
+ access_list_storage_len as u64 * self.config.gas_access_list_storage_key
}
};
event!(RecordTransaction {
cost: gas_cost,
snapshot: self.snapshot()?,
});
if self.gas() < gas_cost {
self.inner = Err(ExitError::OutOfGas);
return Err(ExitError::OutOfGas);
}
self.inner_mut()?.used_gas += gas_cost;
Ok(())
}
pub fn snapshot(&self) -> Result<Snapshot, ExitError> {
let inner = self.inner.as_ref().map_err(|e| e.clone())?;
Ok(Snapshot {
gas_limit: self.gas_limit,
memory_gas: inner.memory_gas,
used_gas: inner.used_gas,
refunded_gas: inner.refunded_gas,
})
}
}
/// Calculate the call transaction cost.
pub fn call_transaction_cost(data: &[u8], access_list: &[(H160, Vec<H256>)]) -> TransactionCost {
let zero_data_len = data.iter().filter(|v| **v == 0).count();
let non_zero_data_len = data.len() - zero_data_len;
let (access_list_address_len, access_list_storage_len) = count_access_list(access_list);
TransactionCost::Call {
zero_data_len,
non_zero_data_len,
access_list_address_len,
access_list_storage_len,
}
}
/// Calculate the create transaction cost.
pub fn create_transaction_cost(data: &[u8], access_list: &[(H160, Vec<H256>)]) -> TransactionCost {
let zero_data_len = data.iter().filter(|v| **v == 0).count();
let non_zero_data_len = data.len() - zero_data_len;
let (access_list_address_len, access_list_storage_len) = count_access_list(access_list);
TransactionCost::Create {
zero_data_len,
non_zero_data_len,
access_list_address_len,
access_list_storage_len,
}
}
/// Counts the number of addresses and storage keys in the access list
fn count_access_list(access_list: &[(H160, Vec<H256>)]) -> (usize, usize) {
let access_list_address_len = access_list.len();
let access_list_storage_len = access_list.iter().map(|(_, keys)| keys.len()).sum();
(access_list_address_len, access_list_storage_len)
}
#[inline]
pub fn static_opcode_cost(opcode: Opcode) -> Option<u64> {
static TABLE: [Option<u64>; 256] = {
let mut table = [None; 256];
table[Opcode::STOP.as_usize()] = Some(consts::G_ZERO);
table[Opcode::CALLDATASIZE.as_usize()] = Some(consts::G_BASE);
table[Opcode::CODESIZE.as_usize()] = Some(consts::G_BASE);
table[Opcode::POP.as_usize()] = Some(consts::G_BASE);
table[Opcode::PC.as_usize()] = Some(consts::G_BASE);
table[Opcode::MSIZE.as_usize()] = Some(consts::G_BASE);
table[Opcode::ADDRESS.as_usize()] = Some(consts::G_BASE);
table[Opcode::ORIGIN.as_usize()] = Some(consts::G_BASE);
table[Opcode::CALLER.as_usize()] = Some(consts::G_BASE);
table[Opcode::CALLVALUE.as_usize()] = Some(consts::G_BASE);
table[Opcode::COINBASE.as_usize()] = Some(consts::G_BASE);
table[Opcode::TIMESTAMP.as_usize()] = Some(consts::G_BASE);
table[Opcode::NUMBER.as_usize()] = Some(consts::G_BASE);
table[Opcode::DIFFICULTY.as_usize()] = Some(consts::G_BASE);
table[Opcode::GASLIMIT.as_usize()] = Some(consts::G_BASE);
table[Opcode::GASPRICE.as_usize()] = Some(consts::G_BASE);
table[Opcode::GAS.as_usize()] = Some(consts::G_BASE);
table[Opcode::ADD.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SUB.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::NOT.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::LT.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::GT.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SLT.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SGT.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::EQ.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::ISZERO.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::AND.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::OR.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::XOR.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::BYTE.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::CALLDATALOAD.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH1.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH2.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH3.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH4.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH5.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH6.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH7.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH8.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH9.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH10.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH11.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH12.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH13.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH14.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH15.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH16.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH17.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH18.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH19.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH20.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH21.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH22.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH23.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH24.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH25.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH26.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH27.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH28.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH29.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH30.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH31.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::PUSH32.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP1.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP2.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP3.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP4.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP5.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP6.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP7.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP8.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP9.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP10.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP11.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP12.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP13.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP14.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP15.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::DUP16.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP1.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP2.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP3.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP4.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP5.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP6.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP7.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP8.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP9.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP10.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP11.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP12.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP13.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP14.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP15.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::SWAP16.as_usize()] = Some(consts::G_VERYLOW);
table[Opcode::MUL.as_usize()] = Some(consts::G_LOW);
table[Opcode::DIV.as_usize()] = Some(consts::G_LOW);
table[Opcode::SDIV.as_usize()] = Some(consts::G_LOW);
table[Opcode::MOD.as_usize()] = Some(consts::G_LOW);
table[Opcode::SMOD.as_usize()] = Some(consts::G_LOW);
table[Opcode::SIGNEXTEND.as_usize()] = Some(consts::G_LOW);
table[Opcode::ADDMOD.as_usize()] = Some(consts::G_MID);
table[Opcode::MULMOD.as_usize()] = Some(consts::G_MID);
table[Opcode::JUMP.as_usize()] = Some(consts::G_MID);
table[Opcode::JUMPI.as_usize()] = Some(consts::G_HIGH);
table[Opcode::JUMPDEST.as_usize()] = Some(consts::G_JUMPDEST);
table
};
TABLE[opcode.as_usize()]
}
/// Calculate the opcode cost.
#[allow(clippy::nonminimal_bool)]
pub fn dynamic_opcode_cost<H: Handler>(
address: H160,
opcode: Opcode,
stack: &Stack,
is_static: bool,
config: &Config,
handler: &H,
) -> Result<(GasCost, StorageTarget, Option<MemoryCost>), ExitError> {
let mut storage_target = StorageTarget::None;
let gas_cost = match opcode {
Opcode::RETURN => GasCost::Zero,
Opcode::MLOAD | Opcode::MSTORE | Opcode::MSTORE8 => GasCost::VeryLow,
Opcode::REVERT if config.has_revert => GasCost::Zero,
Opcode::REVERT => GasCost::Invalid,
Opcode::CHAINID if config.has_chain_id => GasCost::Base,
Opcode::CHAINID => GasCost::Invalid,
Opcode::SHL | Opcode::SHR | Opcode::SAR if config.has_bitwise_shifting => GasCost::VeryLow,
Opcode::SHL | Opcode::SHR | Opcode::SAR => GasCost::Invalid,
Opcode::SELFBALANCE if config.has_self_balance => GasCost::Low,
Opcode::SELFBALANCE => GasCost::Invalid,
Opcode::EXTCODESIZE => {
let target = stack.peek(0)?.into();
storage_target = StorageTarget::Address(target);
GasCost::ExtCodeSize {
target_is_cold: handler.is_cold(target, None),
}
}
Opcode::BALANCE => {
let target = stack.peek(0)?.into();
storage_target = StorageTarget::Address(target);
GasCost::Balance {
target_is_cold: handler.is_cold(target, None),
}
}
Opcode::BLOCKHASH => GasCost::BlockHash,
Opcode::EXTCODEHASH if config.has_ext_code_hash => {
let target = stack.peek(0)?.into();
storage_target = StorageTarget::Address(target);
GasCost::ExtCodeHash {
target_is_cold: handler.is_cold(target, None),
}
}
Opcode::EXTCODEHASH => GasCost::Invalid,
Opcode::CALLCODE => {
let target = stack.peek(1)?.into();
storage_target = StorageTarget::Address(target);
GasCost::CallCode {
value: U256::from_big_endian(&stack.peek(2)?[..]),
gas: U256::from_big_endian(&stack.peek(0)?[..]),
target_is_cold: handler.is_cold(target, None),
target_exists: handler.exists(target),
}
}
Opcode::STATICCALL => {
let target = stack.peek(1)?.into();
storage_target = StorageTarget::Address(target);
GasCost::StaticCall {
gas: U256::from_big_endian(&stack.peek(0)?[..]),
target_is_cold: handler.is_cold(target, None),
target_exists: handler.exists(target),
}
}
Opcode::SHA3 => GasCost::Sha3 {
len: U256::from_big_endian(&stack.peek(1)?[..]),
},
Opcode::EXTCODECOPY => {
let target = stack.peek(0)?.into();
storage_target = StorageTarget::Address(target);
GasCost::ExtCodeCopy {
target_is_cold: handler.is_cold(target, None),
len: U256::from_big_endian(&stack.peek(3)?[..]),
}
}
Opcode::CALLDATACOPY | Opcode::CODECOPY => GasCost::VeryLowCopy {
len: U256::from_big_endian(&stack.peek(2)?[..]),
},
Opcode::EXP => GasCost::Exp {
power: U256::from_big_endian(&stack.peek(1)?[..]),
},
Opcode::SLOAD => {
let index = stack.peek(0)?;
storage_target = StorageTarget::Slot(address, index);
GasCost::SLoad {
target_is_cold: handler.is_cold(address, Some(index)),
}
}
Opcode::DELEGATECALL if config.has_delegate_call => {
let target = stack.peek(1)?.into();
storage_target = StorageTarget::Address(target);
GasCost::DelegateCall {
gas: U256::from_big_endian(&stack.peek(0)?[..]),
target_is_cold: handler.is_cold(target, None),
target_exists: handler.exists(target),
}
}
Opcode::DELEGATECALL => GasCost::Invalid,
Opcode::RETURNDATASIZE if config.has_return_data => GasCost::Base,
Opcode::RETURNDATACOPY if config.has_return_data => GasCost::VeryLowCopy {
len: U256::from_big_endian(&stack.peek(2)?[..]),
},
Opcode::RETURNDATASIZE | Opcode::RETURNDATACOPY => GasCost::Invalid,
Opcode::SSTORE if !is_static => {
let index = stack.peek(0)?;
let value = stack.peek(1)?;
storage_target = StorageTarget::Slot(address, index);
GasCost::SStore {
original: handler.original_storage(address, index),
current: handler.storage(address, index),
new: value,
target_is_cold: handler.is_cold(address, Some(index)),
}
}
Opcode::LOG0 if !is_static => GasCost::Log {
n: 0,
len: U256::from_big_endian(&stack.peek(1)?[..]),
},
Opcode::LOG1 if !is_static => GasCost::Log {
n: 1,
len: U256::from_big_endian(&stack.peek(1)?[..]),
},
Opcode::LOG2 if !is_static => GasCost::Log {
n: 2,
len: U256::from_big_endian(&stack.peek(1)?[..]),
},
Opcode::LOG3 if !is_static => GasCost::Log {
n: 3,
len: U256::from_big_endian(&stack.peek(1)?[..]),
},
Opcode::LOG4 if !is_static => GasCost::Log {
n: 4,
len: U256::from_big_endian(&stack.peek(1)?[..]),
},
Opcode::CREATE if !is_static => GasCost::Create,
Opcode::CREATE2 if !is_static && config.has_create2 => GasCost::Create2 {
len: U256::from_big_endian(&stack.peek(2)?[..]),
},
Opcode::SUICIDE if !is_static => {
let target = stack.peek(0)?.into();
storage_target = StorageTarget::Address(target);
GasCost::Suicide {
value: handler.balance(address),
target_is_cold: handler.is_cold(target, None),
target_exists: handler.exists(target),
already_removed: handler.deleted(address),
}
}
Opcode::CALL
if !is_static
|| (is_static && U256::from_big_endian(&stack.peek(2)?[..]) == U256::zero()) =>
{
let target = stack.peek(1)?.into();
storage_target = StorageTarget::Address(target);
GasCost::Call {
value: U256::from_big_endian(&stack.peek(2)?[..]),
gas: U256::from_big_endian(&stack.peek(0)?[..]),
target_is_cold: handler.is_cold(target, None),
target_exists: handler.exists(target),
}
}
_ => GasCost::Invalid,
};
let memory_cost = match opcode {
Opcode::SHA3
| Opcode::RETURN
| Opcode::REVERT
| Opcode::LOG0
| Opcode::LOG1
| Opcode::LOG2
| Opcode::LOG3
| Opcode::LOG4 => Some(MemoryCost {
offset: U256::from_big_endian(&stack.peek(0)?[..]),
len: U256::from_big_endian(&stack.peek(1)?[..]),
}),
Opcode::CODECOPY | Opcode::CALLDATACOPY | Opcode::RETURNDATACOPY => Some(MemoryCost {
offset: U256::from_big_endian(&stack.peek(0)?[..]),
len: U256::from_big_endian(&stack.peek(2)?[..]),
}),
Opcode::EXTCODECOPY => Some(MemoryCost {
offset: U256::from_big_endian(&stack.peek(1)?[..]),
len: U256::from_big_endian(&stack.peek(3)?[..]),
}),
Opcode::MLOAD | Opcode::MSTORE => Some(MemoryCost {
offset: U256::from_big_endian(&stack.peek(0)?[..]),
len: U256::from(32),
}),
Opcode::MSTORE8 => Some(MemoryCost {
offset: U256::from_big_endian(&stack.peek(0)?[..]),
len: U256::from(1),
}),
Opcode::CREATE | Opcode::CREATE2 => Some(MemoryCost {
offset: U256::from_big_endian(&stack.peek(1)?[..]),
len: U256::from_big_endian(&stack.peek(2)?[..]),
}),
Opcode::CALL | Opcode::CALLCODE => Some(
MemoryCost {
offset: U256::from_big_endian(&stack.peek(3)?[..]),
len: U256::from_big_endian(&stack.peek(4)?[..]),
}
.join(MemoryCost {
offset: U256::from_big_endian(&stack.peek(5)?[..]),
len: U256::from_big_endian(&stack.peek(6)?[..]),
}),
),
Opcode::DELEGATECALL | Opcode::STATICCALL => Some(
MemoryCost {
offset: U256::from_big_endian(&stack.peek(2)?[..]),
len: U256::from_big_endian(&stack.peek(3)?[..]),
}
.join(MemoryCost {
offset: U256::from_big_endian(&stack.peek(4)?[..]),
len: U256::from_big_endian(&stack.peek(5)?[..]),
}),
),
_ => None,
};
Ok((gas_cost, storage_target, memory_cost))
}
/// Holds the gas consumption for a Gasometer instance.
#[derive(Clone)]
struct Inner<'config> {
memory_gas: u64,
used_gas: u64,
refunded_gas: i64,
config: &'config Config,
}
impl<'config> Inner<'config> {
fn memory_gas(&self, memory: MemoryCost) -> Result<u64, ExitError> {
let from = memory.offset;
let len = memory.len;
if len == U256::zero() {
return Ok(self.memory_gas);
}
let end = from.checked_add(len).ok_or(ExitError::OutOfGas)?;
if end > U256::from(usize::MAX) {
return Err(ExitError::OutOfGas);
}
let end = end.as_usize();
let rem = end % 32;
let new = if rem == 0 { end / 32 } else { end / 32 + 1 };
Ok(max(self.memory_gas, memory::memory_gas(new)?))
}
fn extra_check(&self, cost: GasCost, after_gas: u64) -> Result<(), ExitError> {
match cost {
GasCost::Call { gas, .. } => costs::call_extra_check(gas, after_gas, self.config),
GasCost::CallCode { gas, .. } => costs::call_extra_check(gas, after_gas, self.config),
GasCost::DelegateCall { gas, .. } => {
costs::call_extra_check(gas, after_gas, self.config)
}
GasCost::StaticCall { gas, .. } => costs::call_extra_check(gas, after_gas, self.config),
_ => Ok(()),
}
}
/// Returns the gas cost numerical value.
fn gas_cost(&self, cost: GasCost, gas: u64) -> Result<u64, ExitError> {
Ok(match cost {
GasCost::Call {
value,
target_is_cold,
target_exists,
..
} => costs::call_cost(
value,
target_is_cold,
true,
true,
!target_exists,
self.config,
),
GasCost::CallCode {
value,
target_is_cold,
target_exists,
..
} => costs::call_cost(
value,
target_is_cold,
true,
false,
!target_exists,
self.config,
),
GasCost::DelegateCall {
target_is_cold,
target_exists,
..
} => costs::call_cost(
U256::zero(),
target_is_cold,
false,
false,
!target_exists,
self.config,
),
GasCost::StaticCall {
target_is_cold,
target_exists,
..
} => costs::call_cost(
U256::zero(),
target_is_cold,
false,
true,
!target_exists,
self.config,
),
GasCost::Suicide {
value,
target_is_cold,
target_exists,
..
} => costs::suicide_cost(value, target_is_cold, target_exists, self.config),
GasCost::SStore { .. } if self.config.estimate => self.config.gas_sstore_set,
GasCost::SStore {
original,
current,
new,
target_is_cold,
} => costs::sstore_cost(original, current, new, gas, target_is_cold, self.config)?,
GasCost::Sha3 { len } => costs::sha3_cost(len)?,
GasCost::Log { n, len } => costs::log_cost(n, len)?,
GasCost::VeryLowCopy { len } => costs::verylowcopy_cost(len)?,
GasCost::Exp { power } => costs::exp_cost(power, self.config)?,
GasCost::Create => consts::G_CREATE,
GasCost::Create2 { len } => costs::create2_cost(len)?,
GasCost::SLoad { target_is_cold } => costs::sload_cost(target_is_cold, self.config),
GasCost::Zero => consts::G_ZERO,
GasCost::Base => consts::G_BASE,
GasCost::VeryLow => consts::G_VERYLOW,
GasCost::Low => consts::G_LOW,
GasCost::Invalid => return Err(ExitError::OutOfGas),
GasCost::ExtCodeSize { target_is_cold } => {
costs::address_access_cost(target_is_cold, self.config.gas_ext_code, self.config)
}
GasCost::ExtCodeCopy {
target_is_cold,
len,
} => costs::extcodecopy_cost(len, target_is_cold, self.config)?,
GasCost::Balance { target_is_cold } => {
costs::address_access_cost(target_is_cold, self.config.gas_balance, self.config)
}
GasCost::BlockHash => consts::G_BLOCKHASH,
GasCost::ExtCodeHash { target_is_cold } => costs::address_access_cost(
target_is_cold,
self.config.gas_ext_code_hash,
self.config,
),
})
}
fn gas_refund(&self, cost: GasCost) -> i64 {
match cost {
_ if self.config.estimate => 0,
GasCost::SStore {
original,
current,
new,
..
} => costs::sstore_refund(original, current, new, self.config),
GasCost::Suicide {
already_removed, ..
} => costs::suicide_refund(already_removed),
_ => 0,
}
}
}
/// Gas cost.
#[derive(Debug, Clone, Copy)]
pub enum GasCost {
/// Zero gas cost.
Zero,
/// Base gas cost.
Base,
/// Very low gas cost.
VeryLow,
/// Low gas cost.
Low,
/// Fail the gasometer.
Invalid,
/// Gas cost for `EXTCODESIZE`.
ExtCodeSize {
/// True if address has not been previously accessed in this transaction
target_is_cold: bool,
},
/// Gas cost for `BALANCE`.
Balance {
/// True if address has not been previously accessed in this transaction
target_is_cold: bool,
},
/// Gas cost for `BLOCKHASH`.
BlockHash,
/// Gas cost for `EXTBLOCKHASH`.
ExtCodeHash {
/// True if address has not been previously accessed in this transaction
target_is_cold: bool,
},
/// Gas cost for `CALL`.
Call {
/// Call value.
value: U256,
/// Call gas.
gas: U256,
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
/// Whether the target exists.
target_exists: bool,
},
/// Gas cost for `CALLCODE.
CallCode {
/// Call value.
value: U256,
/// Call gas.
gas: U256,
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
/// Whether the target exists.
target_exists: bool,
},
/// Gas cost for `DELEGATECALL`.
DelegateCall {
/// Call gas.
gas: U256,
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
/// Whether the target exists.
target_exists: bool,
},
/// Gas cost for `STATICCALL`.
StaticCall {
/// Call gas.
gas: U256,
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
/// Whether the target exists.
target_exists: bool,
},
/// Gas cost for `SUICIDE`.
Suicide {
/// Value.
value: U256,
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
/// Whether the target exists.
target_exists: bool,
/// Whether the target has already been removed.
already_removed: bool,
},
/// Gas cost for `SSTORE`.
SStore {
/// Original value.
original: H256,
/// Current value.
current: H256,
/// New value.
new: H256,
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
},
/// Gas cost for `SHA3`.
Sha3 {
/// Length of the data.
len: U256,
},
/// Gas cost for `LOG`.
Log {
/// Topic length.
n: u8,
/// Data length.
len: U256,
},
/// Gas cost for `EXTCODECOPY`.
ExtCodeCopy {
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
/// Length.
len: U256,
},
/// Gas cost for some copy opcodes that is documented as `VERYLOW`.
VeryLowCopy {
/// Length.
len: U256,
},
/// Gas cost for `EXP`.
Exp {
/// Power of `EXP`.
power: U256,
},
/// Gas cost for `CREATE`.
Create,
/// Gas cost for `CREATE2`.
Create2 {
/// Length.
len: U256,
},
/// Gas cost for `SLOAD`.
SLoad {
/// True if target has not been previously accessed in this transaction
target_is_cold: bool,
},
}
/// Storage opcode will access. Used for tracking accessed storage (EIP-2929).
#[derive(Debug, Clone, Copy)]
pub enum StorageTarget {
/// No storage access
None,
/// Accessing address
Address(H160),
/// Accessing storage slot within an address
Slot(H160, H256),
}
/// Memory cost.
#[derive(Debug, Clone, Copy)]
pub struct MemoryCost {
/// Affected memory offset.
pub offset: U256,
/// Affected length.
pub len: U256,
}
/// Transaction cost.
#[derive(Debug, Clone, Copy)]
pub enum TransactionCost {
/// Call transaction cost.
Call {
/// Length of zeros in transaction data.
zero_data_len: usize,
/// Length of non-zeros in transaction data.
non_zero_data_len: usize,
/// Number of addresses in transaction access list (see EIP-2930)
access_list_address_len: usize,
/// Total number of storage keys in transaction access list (see EIP-2930)
access_list_storage_len: usize,
},
/// Create transaction cost.
Create {
/// Length of zeros in transaction data.
zero_data_len: usize,
/// Length of non-zeros in transaction data.
non_zero_data_len: usize,
/// Number of addresses in transaction access list (see EIP-2930)
access_list_address_len: usize,
/// Total number of storage keys in transaction access list (see EIP-2930)
access_list_storage_len: usize,
},
}
impl MemoryCost {
/// Join two memory cost together.
pub fn join(self, other: MemoryCost) -> MemoryCost {
if self.len == U256::zero() {
return other;
}
if other.len == U256::zero() {
return self;
}
let self_end = self.offset.saturating_add(self.len);
let other_end = other.offset.saturating_add(other.len);
if self_end >= other_end {
self
} else {
other
}
}
}
| 29.245446 | 99 | 0.668033 |
3392a8b5aa5ca2ba7f08dac480db123be6077743 | 1,718 | use crate::configuration::Configuration;
use dprint_core::{
configuration::{ConfigKeyMap, GlobalConfiguration, ResolveConfigurationResult},
plugins::{PluginHandler, PluginInfo},
types::ErrBox,
};
use std::{path::Path, vec};
#[derive(Default)]
pub struct TaploPluginHandler {}
impl PluginHandler<Configuration> for TaploPluginHandler {
fn get_plugin_info(&mut self) -> PluginInfo {
PluginInfo {
name: env!("CARGO_PKG_NAME").to_string(),
version: env!("CARGO_PKG_VERSION").to_string(),
config_key: "taplo".to_string(),
file_extensions: vec!["toml".to_string()],
file_names: vec![],
help_url: "https://taplo.tamasfe.dev/configuration/#formatting-options".to_string(),
config_schema_url: "".to_string(),
}
}
fn get_license_text(&mut self) -> String {
std::str::from_utf8(include_bytes!("../LICENSE"))
.unwrap()
.into()
}
fn resolve_config(
&mut self,
config: ConfigKeyMap,
global_config: &GlobalConfiguration,
) -> ResolveConfigurationResult<Configuration> {
crate::configuration::resolve_config(config, global_config)
}
fn format_text(
&mut self,
_file_path: &Path,
file_text: &str,
config: &Configuration,
mut _format_with_host: impl FnMut(&Path, String, &ConfigKeyMap) -> Result<String, ErrBox>,
) -> Result<String, ErrBox> {
Ok(crate::format_text::format_text(file_text, config)?)
}
}
#[cfg(feature = "wasm")]
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
dprint_core::generate_plugin_code!(TaploPluginHandler, TaploPluginHandler {});
| 32.415094 | 98 | 0.636205 |
d6a2c17d6f6f9a6d2b265180313840d927e296db | 11,507 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Check license of third-party deps by inspecting src/vendor
use std::collections::{BTreeSet, HashSet, HashMap};
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::process::Command;
use serde_json;
static LICENSES: &'static [&'static str] = &[
"MIT/Apache-2.0",
"MIT / Apache-2.0",
"Apache-2.0/MIT",
"Apache-2.0 / MIT",
"MIT OR Apache-2.0",
"MIT",
"Unlicense/MIT",
"Unlicense OR MIT",
];
/// These are exceptions to Rust's permissive licensing policy, and
/// should be considered bugs. Exceptions are only allowed in Rust
/// tooling. It is _crucial_ that no exception crates be dependencies
/// of the Rust runtime (std / test).
static EXCEPTIONS: &'static [&'static str] = &[
"mdbook", // MPL2, mdbook
"openssl", // BSD+advertising clause, cargo, mdbook
"pest", // MPL2, mdbook via handlebars
"thread-id", // Apache-2.0, mdbook
"toml-query", // MPL-2.0, mdbook
"is-match", // MPL-2.0, mdbook
"cssparser", // MPL-2.0, rustdoc
"smallvec", // MPL-2.0, rustdoc
"fuchsia-zircon-sys", // BSD-3-Clause, rustdoc, rustc, cargo
"fuchsia-zircon", // BSD-3-Clause, rustdoc, rustc, cargo (jobserver & tempdir)
"cssparser-macros", // MPL-2.0, rustdoc
"selectors", // MPL-2.0, rustdoc
"clippy_lints", // MPL-2.0, rls
"colored", // MPL-2.0, rustfmt
"ordslice", // Apache-2.0, rls
"cloudabi", // BSD-2-Clause, (rls -> crossbeam-channel 0.2 -> rand 0.5)
"ryu", // Apache-2.0, rls/cargo/... (b/c of serde)
];
/// Which crates to check against the whitelist?
static WHITELIST_CRATES: &'static [CrateVersion] = &[
CrateVersion("rustc", "0.0.0"),
CrateVersion("rustc_codegen_llvm", "0.0.0"),
];
/// Whitelist of crates rustc is allowed to depend on. Avoid adding to the list if possible.
static WHITELIST: &'static [Crate] = &[
Crate("aho-corasick"),
Crate("arrayvec"),
Crate("atty"),
Crate("backtrace"),
Crate("backtrace-sys"),
Crate("bitflags"),
Crate("byteorder"),
Crate("cc"),
Crate("cfg-if"),
Crate("chalk-engine"),
Crate("chalk-macros"),
Crate("cloudabi"),
Crate("cmake"),
Crate("crossbeam-deque"),
Crate("crossbeam-epoch"),
Crate("crossbeam-utils"),
Crate("datafrog"),
Crate("either"),
Crate("ena"),
Crate("env_logger"),
Crate("filetime"),
Crate("flate2"),
Crate("fuchsia-zircon"),
Crate("fuchsia-zircon-sys"),
Crate("getopts"),
Crate("humantime"),
Crate("jobserver"),
Crate("kernel32-sys"),
Crate("lazy_static"),
Crate("libc"),
Crate("lock_api"),
Crate("log"),
Crate("log_settings"),
Crate("memchr"),
Crate("memmap"),
Crate("memoffset"),
Crate("miniz-sys"),
Crate("nodrop"),
Crate("num_cpus"),
Crate("owning_ref"),
Crate("parking_lot"),
Crate("parking_lot_core"),
Crate("pkg-config"),
Crate("polonius-engine"),
Crate("quick-error"),
Crate("rand"),
Crate("rand_core"),
Crate("redox_syscall"),
Crate("redox_termios"),
Crate("regex"),
Crate("regex-syntax"),
Crate("remove_dir_all"),
Crate("rustc-demangle"),
Crate("rustc-hash"),
Crate("rustc-rayon"),
Crate("rustc-rayon-core"),
Crate("scoped-tls"),
Crate("scopeguard"),
Crate("smallvec"),
Crate("stable_deref_trait"),
Crate("tempfile"),
Crate("termcolor"),
Crate("terminon"),
Crate("termion"),
Crate("thread_local"),
Crate("ucd-util"),
Crate("unicode-width"),
Crate("unreachable"),
Crate("utf8-ranges"),
Crate("version_check"),
Crate("void"),
Crate("winapi"),
Crate("winapi-build"),
Crate("winapi-i686-pc-windows-gnu"),
Crate("winapi-util"),
Crate("winapi-x86_64-pc-windows-gnu"),
Crate("wincolor"),
];
// Some types for Serde to deserialize the output of `cargo metadata` to...
#[derive(Deserialize)]
struct Output {
resolve: Resolve,
}
#[derive(Deserialize)]
struct Resolve {
nodes: Vec<ResolveNode>,
}
#[derive(Deserialize)]
struct ResolveNode {
id: String,
dependencies: Vec<String>,
}
/// A unique identifier for a crate
#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Debug, Hash)]
struct Crate<'a>(&'a str); // (name,)
#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Debug, Hash)]
struct CrateVersion<'a>(&'a str, &'a str); // (name, version)
impl<'a> Crate<'a> {
pub fn id_str(&self) -> String {
format!("{} ", self.0)
}
}
impl<'a> CrateVersion<'a> {
/// Returns the struct and whether or not the dep is in-tree
pub fn from_str(s: &'a str) -> (Self, bool) {
let mut parts = s.split(' ');
let name = parts.next().unwrap();
let version = parts.next().unwrap();
let path = parts.next().unwrap();
let is_path_dep = path.starts_with("(path+");
(CrateVersion(name, version), is_path_dep)
}
pub fn id_str(&self) -> String {
format!("{} {}", self.0, self.1)
}
}
impl<'a> From<CrateVersion<'a>> for Crate<'a> {
fn from(cv: CrateVersion<'a>) -> Crate<'a> {
Crate(cv.0)
}
}
/// Checks the dependency at the given path. Changes `bad` to `true` if a check failed.
///
/// Specifically, this checks that the license is correct.
pub fn check(path: &Path, bad: &mut bool) {
// Check licences
let path = path.join("vendor");
assert!(path.exists(), "vendor directory missing");
let mut saw_dir = false;
for dir in t!(path.read_dir()) {
saw_dir = true;
let dir = t!(dir);
// skip our exceptions
if EXCEPTIONS.iter().any(|exception| {
dir.path()
.to_str()
.unwrap()
.contains(&format!("src/vendor/{}", exception))
}) {
continue;
}
let toml = dir.path().join("Cargo.toml");
*bad = *bad || !check_license(&toml);
}
assert!(saw_dir, "no vendored source");
}
/// Checks the dependency of WHITELIST_CRATES at the given path. Changes `bad` to `true` if a check
/// failed.
///
/// Specifically, this checks that the dependencies are on the WHITELIST.
pub fn check_whitelist(path: &Path, cargo: &Path, bad: &mut bool) {
// Get dependencies from cargo metadata
let resolve = get_deps(path, cargo);
// Get the whitelist into a convenient form
let whitelist: HashSet<_> = WHITELIST.iter().cloned().collect();
// Check dependencies
let mut visited = BTreeSet::new();
let mut unapproved = BTreeSet::new();
for &krate in WHITELIST_CRATES.iter() {
let mut bad = check_crate_whitelist(&whitelist, &resolve, &mut visited, krate, false);
unapproved.append(&mut bad);
}
if unapproved.len() > 0 {
println!("Dependencies not on the whitelist:");
for dep in unapproved {
println!("* {}", dep.id_str());
}
*bad = true;
}
check_crate_duplicate(&resolve, bad);
}
fn check_license(path: &Path) -> bool {
if !path.exists() {
panic!("{} does not exist", path.display());
}
let mut contents = String::new();
t!(t!(File::open(path)).read_to_string(&mut contents));
let mut found_license = false;
for line in contents.lines() {
if !line.starts_with("license") {
continue;
}
let license = extract_license(line);
if !LICENSES.contains(&&*license) {
println!("invalid license {} in {}", license, path.display());
return false;
}
found_license = true;
break;
}
if !found_license {
println!("no license in {}", path.display());
return false;
}
true
}
fn extract_license(line: &str) -> String {
let first_quote = line.find('"');
let last_quote = line.rfind('"');
if let (Some(f), Some(l)) = (first_quote, last_quote) {
let license = &line[f + 1..l];
license.into()
} else {
"bad-license-parse".into()
}
}
/// Get the dependencies of the crate at the given path using `cargo metadata`.
fn get_deps(path: &Path, cargo: &Path) -> Resolve {
// Run `cargo metadata` to get the set of dependencies
let output = Command::new(cargo)
.arg("metadata")
.arg("--format-version")
.arg("1")
.arg("--manifest-path")
.arg(path.join("Cargo.toml"))
.output()
.expect("Unable to run `cargo metadata`")
.stdout;
let output = String::from_utf8_lossy(&output);
let output: Output = serde_json::from_str(&output).unwrap();
output.resolve
}
/// Checks the dependencies of the given crate from the given cargo metadata to see if they are on
/// the whitelist. Returns a list of illegal dependencies.
fn check_crate_whitelist<'a, 'b>(
whitelist: &'a HashSet<Crate>,
resolve: &'a Resolve,
visited: &'b mut BTreeSet<CrateVersion<'a>>,
krate: CrateVersion<'a>,
must_be_on_whitelist: bool,
) -> BTreeSet<Crate<'a>> {
// Will contain bad deps
let mut unapproved = BTreeSet::new();
// Check if we have already visited this crate
if visited.contains(&krate) {
return unapproved;
}
visited.insert(krate);
// If this path is in-tree, we don't require it to be on the whitelist
if must_be_on_whitelist {
// If this dependency is not on the WHITELIST, add to bad set
if !whitelist.contains(&krate.into()) {
unapproved.insert(krate.into());
}
}
// Do a DFS in the crate graph (it's a DAG, so we know we have no cycles!)
let to_check = resolve
.nodes
.iter()
.find(|n| n.id.starts_with(&krate.id_str()))
.expect("crate does not exist");
for dep in to_check.dependencies.iter() {
let (krate, is_path_dep) = CrateVersion::from_str(dep);
let mut bad = check_crate_whitelist(whitelist, resolve, visited, krate, !is_path_dep);
unapproved.append(&mut bad);
}
unapproved
}
fn check_crate_duplicate(resolve: &Resolve, bad: &mut bool) {
const FORBIDDEN_TO_HAVE_DUPLICATES: &[&str] = &[
// These two crates take quite a long time to build, let's not let two
// versions of them accidentally sneak into our dependency graph to
// ensure we keep our CI times under control
// "cargo", // FIXME(#53005)
"rustc-ap-syntax",
];
let mut name_to_id: HashMap<_, Vec<_>> = HashMap::new();
for node in resolve.nodes.iter() {
name_to_id.entry(node.id.split_whitespace().next().unwrap())
.or_default()
.push(&node.id);
}
for name in FORBIDDEN_TO_HAVE_DUPLICATES {
if name_to_id[name].len() <= 1 {
continue
}
println!("crate `{}` is duplicated in `Cargo.lock`", name);
for id in name_to_id[name].iter() {
println!(" * {}", id);
}
*bad = true;
}
}
| 30.044386 | 99 | 0.594942 |
6a744664a7c03e344936bae2fcbe735b34d14c7f | 7,037 | use clap::{clap_app, crate_authors, crate_version};
use nispor::{Iface, NetConf, NetState, NisporError, Route, RouteRule};
use serde_derive::Serialize;
use serde_json;
use serde_yaml;
use std::fmt;
use std::io::{stderr, stdout, Write};
use std::process;
#[derive(Serialize)]
pub struct CliError {
pub msg: String,
}
impl fmt::Display for CliError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.msg)
}
}
enum CliResult {
Pass,
Full(NetState),
Ifaces(Vec<Iface>),
Routes(Vec<Route>),
RouteRules(Vec<RouteRule>),
CliError(CliError),
NisporError(NisporError),
}
enum CliOutputType {
Json,
Yaml,
}
macro_rules! npc_print {
($display_func:expr, $data: expr) => {
match $data {
CliResult::Pass => {
process::exit(0);
}
CliResult::Full(netstate) => {
writeln!(stdout(), "{}", $display_func(&netstate).unwrap())
.ok();
process::exit(0);
}
CliResult::Ifaces(ifaces) => {
writeln!(stdout(), "{}", $display_func(&ifaces).unwrap()).ok();
process::exit(0);
}
CliResult::Routes(routes) => {
writeln!(stdout(), "{}", $display_func(&routes).unwrap()).ok();
process::exit(0);
}
CliResult::RouteRules(rules) => {
writeln!(stdout(), "{}", $display_func(&rules).unwrap()).ok();
process::exit(0);
}
CliResult::NisporError(e) => {
writeln!(stderr(), "{}", $display_func(&e).unwrap()).ok();
process::exit(1);
}
CliResult::CliError(e) => {
writeln!(stderr(), "{}", $display_func(&e).unwrap()).ok();
process::exit(1);
}
}
};
}
fn print_result(result: &CliResult, output_type: CliOutputType) {
match output_type {
CliOutputType::Json => npc_print!(serde_json::to_string_pretty, result),
CliOutputType::Yaml => npc_print!(serde_yaml::to_string, result),
}
}
fn parse_arg_output_format(matches: &clap::ArgMatches) -> CliOutputType {
match matches.is_present("json") {
true => CliOutputType::Json,
false => CliOutputType::Yaml,
}
}
fn _is_route_to_specified_dev(route: &Route, iface_name: &str) -> bool {
if let Some(oif) = &route.oif {
if oif == iface_name {
return true;
}
}
if let Some(mp_routes) = &route.multipath {
for mp_route in mp_routes {
if mp_route.iface == iface_name {
return true;
}
}
}
false
}
fn get_routes(state: &NetState, matches: &clap::ArgMatches) -> CliResult {
let mut routes = state.routes.clone();
if let Some(iface_name) = matches.value_of("dev") {
routes = routes
.into_iter()
.filter(|route| _is_route_to_specified_dev(route, iface_name))
.collect();
}
CliResult::Routes(routes)
}
fn main() {
let matches = clap_app!(npc =>
(version: crate_version!())
(author: crate_authors!())
(about: "Nispor CLI")
(@arg ifname: [INTERFACE_NAME] "interface name")
(@arg json: -j --json "Show in json format")
(@subcommand route =>
(@arg json: -j --json "Show in json format")
(@arg dev: -d --dev [OIF] "Show only route entries with output to the specified interface")
(about: "Show routes")
)
(@subcommand rule =>
(@arg json: -j --json "Show in json format")
(about: "Show routes rules")
)
(@subcommand set =>
(@arg file_path: [FILE_PATH] +required "config file to apply")
(about: "Apply network config")
)
)
.get_matches();
let mut output_format = parse_arg_output_format(&matches);
if let Some(m) = matches.subcommand_matches("set") {
if let Some(file_path) = m.value_of("file_path") {
print_result(&apply_conf(&file_path), output_format);
process::exit(0);
} else {
eprintln!("file path undefined");
process::exit(1);
}
} else {
let result = match NetState::retrieve() {
Ok(mut state) => {
if let Some(ifname) = matches.value_of("ifname") {
if let Some(iface) = state.ifaces.remove(ifname) {
CliResult::Ifaces(vec![iface])
} else {
CliResult::CliError(CliError {
msg: format!("Interface '{}' not found", ifname),
})
}
} else if let Some(m) = matches.subcommand_matches("route") {
output_format = parse_arg_output_format(m);
get_routes(&state, &m)
} else if let Some(m) = matches.subcommand_matches("rule") {
output_format = parse_arg_output_format(m);
CliResult::RouteRules(state.rules)
} else {
/* Show everything if no cmdline arg has been supplied */
CliResult::Full(state)
}
}
Err(e) => CliResult::NisporError(e),
};
print_result(&result, output_format);
}
}
fn apply_conf(file_path: &str) -> CliResult {
let fd = match std::fs::File::open(file_path) {
Ok(fd) => fd,
Err(e) => {
return CliResult::CliError(CliError {
msg: format!("Filed to open file {}: {}", file_path, e),
})
}
};
let net_conf: NetConf = match serde_yaml::from_reader(fd) {
Ok(c) => c,
Err(e) => {
return CliResult::CliError(CliError {
msg: format!("Invalid YAML file {}: {}", file_path, e,),
})
}
};
if let Err(e) = net_conf.apply() {
return CliResult::NisporError(e);
}
if let Some(desire_ifaces) = net_conf.ifaces {
match NetState::retrieve() {
Ok(cur_state) => {
let mut desired_iface_names = Vec::new();
for iface_conf in &desire_ifaces {
desired_iface_names.push(iface_conf.name.clone());
}
CliResult::Ifaces(filter_iface_state(
cur_state,
desired_iface_names,
))
}
Err(e) => CliResult::NisporError(e),
}
} else {
CliResult::Pass
}
}
fn filter_iface_state(
cur_state: NetState,
des_iface_names: Vec<String>,
) -> Vec<Iface> {
let mut new_ifaces = Vec::new();
for (iface_name, iface_state) in cur_state.ifaces.iter() {
if des_iface_names.contains(iface_name) {
new_ifaces.push(iface_state.clone());
}
}
new_ifaces
}
| 31 | 103 | 0.516555 |
e9ebafd684681d70eb567164f8ff203bc5e59a2f | 627 | mod example;
mod simple_match;
use crate::StyledText;
pub use example::ExampleHighlighter;
pub use simple_match::SimpleMatchHighlighter;
/// The syntax highlighting trait. Implementers of this trait will take in the current string and then
/// return a `StyledText` object, which represents the contents of the original line as styled strings
pub trait Highlighter: Send {
/// The action that will handle the current buffer as a line and return the corresponding `StyledText` for the buffer
///
/// Cursor position as byte offsets in the string
fn highlight(&self, line: &str, cursor: usize) -> StyledText;
}
| 39.1875 | 121 | 0.755981 |
896d678b70a5429f3bd47a561749789da0ceab03 | 2,664 | use clap::{
crate_authors, crate_description, crate_name, crate_version, App, Arg, ArgMatches, SubCommand,
};
pub fn cli() -> ArgMatches<'static> {
App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.subcommand(
SubCommand::with_name("check").about("Checks NOIDs").arg(
Arg::with_name("input")
.short("i")
.long("input")
.multiple(true)
.min_values(1)
.value_name("FILE")
.help("Checks a list of NOIDs stored in one or more files. One ID per line"),
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.value_name("FILE")
.help("Output file")
)
.arg(
Arg::with_name("workers")
.short("w")
.long("workers")
.value_name("NUMBER OF WORKERS")
.default_value("4")
.help("Sets the number of workers")
),
)
.subcommand(
SubCommand::with_name("checksum").about("Computes the checksum char of NOIDs").arg(
Arg::with_name("input")
.short("i")
.long("input")
.multiple(true)
.min_values(1)
.value_name("FILE")
.help("Computes the checksum char of a list of NOIDs stored in one or more file. One ID per line")
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.value_name("FILE")
.help("Output file")
)
.arg(
Arg::with_name("workers")
.short("w")
.long("workers")
.value_name("NUMBER OF WORKERS")
.default_value("4")
.help("Sets the number of workers")
),
)
.subcommand(
SubCommand::with_name("ws").about("Starts the NCDA Web Service").arg(
Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT")
.help("Sets the port used by the Web Service"),
),
)
.get_matches()
}
| 36.493151 | 118 | 0.402402 |
d9a045cc9daeca02126d788668fcb55e041cf30f | 3,315 | use std::collections::HashMap;
use std::error::Error;
use std::sync::Arc;
use parking_lot::RwLock;
use crate::errors::MemDBError;
/// "DB" defines the "trait" of trie and database interaction.
/// You should first write the data to the cache and write the data
/// to the database in bulk after the end of a set of operations.
pub trait DB: Send + Sync {
type Error: Error;
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Insert data into the cache.
fn insert(&self, key: &[u8], value: Vec<u8>) -> Result<(), Self::Error>;
/// Remove data with given key.
fn remove(&self, key: &[u8]) -> Result<(), Self::Error>;
/// Insert a batch of data into the cache.
fn insert_batch(&self, keys: Vec<Vec<u8>>, values: Vec<Vec<u8>>) -> Result<(), Self::Error> {
for i in 0..keys.len() {
let key = &keys[i];
let value = values[i].clone();
self.insert(key, value)?;
}
Ok(())
}
/// Remove a batch of data into the cache.
fn remove_batch(&self, keys: &[Vec<u8>]) -> Result<(), Self::Error> {
for key in keys {
self.remove(key)?;
}
Ok(())
}
/// Flush data to the DB from the cache.
fn flush(&self) -> Result<(), Self::Error>;
#[cfg(test)]
fn len(&self) -> Result<usize, Self::Error>;
#[cfg(test)]
fn is_empty(&self) -> Result<bool, Self::Error>;
}
#[derive(Default, Debug)]
pub struct MemoryDB {
// If "light" is true, the data is deleted from the database at the time of submission.
light: bool,
storage: Arc<RwLock<HashMap<Vec<u8>, Vec<u8>>>>,
}
impl MemoryDB {
pub fn new(light: bool) -> Self {
MemoryDB {
light,
storage: Arc::new(RwLock::new(HashMap::new())),
}
}
}
impl DB for MemoryDB {
type Error = MemDBError;
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
if let Some(value) = self.storage.read().get(key) {
Ok(Some(value.clone()))
} else {
Ok(None)
}
}
fn insert(&self, key: &[u8], value: Vec<u8>) -> Result<(), Self::Error> {
self.storage.write().insert(key.to_vec(), value);
Ok(())
}
fn remove(&self, key: &[u8]) -> Result<(), Self::Error> {
if self.light {
self.storage.write().remove(key);
}
Ok(())
}
fn flush(&self) -> Result<(), Self::Error> {
Ok(())
}
#[cfg(test)]
fn len(&self) -> Result<usize, Self::Error> {
Ok(self.storage.try_read().unwrap().len())
}
#[cfg(test)]
fn is_empty(&self) -> Result<bool, Self::Error> {
Ok(self.storage.try_read().unwrap().is_empty())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_memdb_get() {
let memdb = MemoryDB::new(true);
memdb.insert(b"test-key", b"test-value".to_vec()).unwrap();
let v = memdb.get(b"test-key").unwrap().unwrap();
assert_eq!(v, b"test-value")
}
#[test]
fn test_memdb_remove() {
let memdb = MemoryDB::new(true);
memdb.insert(b"test", b"test".to_vec()).unwrap();
memdb.remove(b"test").unwrap();
let contains = memdb.get(b"test").unwrap();
assert_eq!(contains, None)
}
}
| 26.309524 | 97 | 0.543288 |
188a665739c07a095bf459a7dcf73ad2fed0847b | 61 | mod listen;
mod model;
pub use listen::*;
pub use model::*;
| 10.166667 | 18 | 0.655738 |
4ab6e0ace4304525a130b56d92d2abffe860f08b | 145,719 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
use log_derive::*;
use mirai_annotations::*;
use rustc_hir::def_id::DefId;
use rustc_middle::mir;
use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
use rustc_middle::ty::{Ty, TyKind, UintTy};
use rustc_target::abi::VariantIdx;
use std::collections::HashMap;
use std::fmt::{Debug, Formatter, Result};
use std::rc::Rc;
use std::time::Instant;
use crate::abstract_value::{AbstractValue, AbstractValueTrait};
use crate::block_visitor::BlockVisitor;
use crate::body_visitor::BodyVisitor;
use crate::constant_domain::{ConstantDomain, FunctionReference};
use crate::environment::Environment;
use crate::expression::{Expression, ExpressionType, LayoutSource};
use crate::k_limits;
use crate::known_names::KnownNames;
use crate::options::DiagLevel;
use crate::path::{Path, PathEnum, PathRefinement, PathSelector};
use crate::summaries::{Precondition, Summary};
use crate::tag_domain::Tag;
use crate::type_visitor::TypeVisitor;
use crate::{abstract_value, utils};
pub struct CallVisitor<'call, 'block, 'analysis, 'compilation, 'tcx> {
pub actual_args: Vec<(Rc<Path>, Rc<AbstractValue>)>,
pub actual_argument_types: Vec<Ty<'tcx>>,
pub block_visitor: &'call mut BlockVisitor<'block, 'analysis, 'compilation, 'tcx>,
pub callee_def_id: DefId,
pub callee_func_ref: Option<Rc<FunctionReference>>,
pub callee_fun_val: Rc<AbstractValue>,
pub callee_generic_arguments: Option<SubstsRef<'tcx>>,
pub callee_known_name: KnownNames,
pub callee_generic_argument_map: Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>>,
pub cleanup: Option<mir::BasicBlock>,
pub destination: Option<(mir::Place<'tcx>, mir::BasicBlock)>,
pub environment_before_call: Environment,
pub function_constant_args: &'call [(Rc<Path>, Ty<'tcx>, Rc<AbstractValue>)],
pub initial_type_cache: Option<Rc<HashMap<Rc<Path>, Ty<'tcx>>>>,
}
impl<'call, 'block, 'analysis, 'compilation, 'tcx> Debug
for CallVisitor<'call, 'block, 'analysis, 'compilation, 'tcx>
{
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
"CallVisitor".fmt(f)
}
}
impl<'call, 'block, 'analysis, 'compilation, 'tcx>
CallVisitor<'call, 'block, 'analysis, 'compilation, 'tcx>
{
pub(crate) fn new(
block_visitor: &'call mut BlockVisitor<'block, 'analysis, 'compilation, 'tcx>,
callee_def_id: DefId,
callee_generic_arguments: Option<SubstsRef<'tcx>>,
callee_generic_argument_map: Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>>,
environment_before_call: Environment,
func_const: ConstantDomain,
) -> CallVisitor<'call, 'block, 'analysis, 'compilation, 'tcx> {
if let ConstantDomain::Function(func_ref) = &func_const {
let callee_known_name = func_ref.known_name;
CallVisitor {
block_visitor,
callee_def_id,
callee_func_ref: Some(func_ref.clone()),
callee_fun_val: Rc::new(func_const.into()),
callee_generic_arguments,
callee_known_name,
callee_generic_argument_map,
actual_args: vec![],
actual_argument_types: vec![],
cleanup: None,
destination: None,
environment_before_call,
function_constant_args: &[],
initial_type_cache: None,
}
} else {
unreachable!("caller should supply a constant function")
}
}
pub fn type_visitor(&self) -> &TypeVisitor<'tcx> {
self.block_visitor.bv.type_visitor()
}
pub fn type_visitor_mut(&mut self) -> &mut TypeVisitor<'tcx> {
self.block_visitor.bv.type_visitor_mut()
}
/// Summarize the referenced function, specialized by its generic arguments and the actual
/// values of any function parameters. Then cache it.
#[logfn_inputs(TRACE)]
pub fn create_and_cache_function_summary(
&mut self,
func_args: &Option<Rc<Vec<Rc<FunctionReference>>>>,
initial_type_cache: &Option<Rc<HashMap<Rc<Path>, Ty<'tcx>>>>,
) -> Summary {
let func_type = self.block_visitor.bv.tcx.type_of(self.callee_def_id);
trace!("summarizing {:?}: {:?}", self.callee_def_id, func_type);
let tcx = self.block_visitor.bv.tcx;
if tcx.is_mir_available(self.callee_def_id) {
let mut body_visitor = BodyVisitor::new(
self.block_visitor.bv.cv,
self.callee_def_id,
self.block_visitor.bv.buffered_diagnostics,
self.block_visitor.bv.active_calls_map,
self.block_visitor.bv.cv.type_cache.clone(),
);
body_visitor.type_visitor_mut().actual_argument_types =
self.actual_argument_types.clone();
body_visitor.type_visitor_mut().generic_arguments = self.callee_generic_arguments;
body_visitor.type_visitor_mut().generic_argument_map =
self.callee_generic_argument_map.clone();
body_visitor.analyzing_static_var = self.block_visitor.bv.analyzing_static_var;
if let Some(cache) = &self.initial_type_cache {
for (p, t) in cache.iter() {
body_visitor
.type_visitor_mut()
.set_path_rustc_type(p.clone(), *t);
}
}
let elapsed_time = self.block_visitor.bv.start_instant.elapsed();
let mut summary = body_visitor.visit_body(self.function_constant_args);
trace!("summary {:?} {:?}", self.callee_def_id, summary);
if let Some(func_ref) = &self.callee_func_ref {
// If there is already a computed summary in the cache, we are in a recursive loop
// and hence have to join the summaries.
let previous_summary = self
.block_visitor
.bv
.cv
.summary_cache
.get_summary_for_call_site(func_ref, func_args, initial_type_cache);
if previous_summary.is_computed {
summary.join_side_effects(previous_summary)
}
// We cache the summary with call site details included so that
// cached summaries are specialized with respect to call site generic arguments and
// function constants arguments. Subsequent calls with the call site signature
// will not need to re-summarize the function, thus avoiding exponential blow up.
self.block_visitor
.bv
.cv
.summary_cache
.set_summary_for_call_site(
func_ref,
func_args,
initial_type_cache,
summary.clone(),
);
}
self.block_visitor.bv.start_instant = Instant::now() - elapsed_time;
return summary;
}
if !self.block_visitor.bv.tcx.is_static(self.callee_def_id) {
info!("function {:?} has no MIR", self.callee_def_id);
if let Some(fr) = &self.callee_func_ref {
info!(
"summary key {:?} with signature {:?}",
fr.summary_cache_key, fr.argument_type_key
);
}
}
Summary::default()
}
/// If self.callee_def_id is a trait (virtual) then this tries to get the def_id of the
/// concrete method that implements the given virtual method and returns the summary of that,
/// computing it if necessary.
#[logfn_inputs(TRACE)]
fn try_to_devirtualize(&mut self) {
if self
.block_visitor
.bv
.tcx
.is_mir_available(self.callee_def_id)
&& !utils::is_trait_method(self.callee_def_id, self.block_visitor.bv.tcx)
{
return;
}
if let Some(gen_args) = self.callee_generic_arguments {
// The parameter environment of the caller provides a resolution context for the callee.
let param_env = rustc_middle::ty::ParamEnv::reveal_all();
trace!(
"devirtualize resolving def_id {:?}: {:?}",
self.callee_def_id,
self.block_visitor.bv.tcx.type_of(self.callee_def_id)
);
trace!("devirtualize resolving func_ref {:?}", self.callee_func_ref,);
trace!("gen_args {:?}", gen_args);
if let Ok(Some(instance)) = rustc_middle::ty::Instance::resolve(
self.block_visitor.bv.tcx,
param_env,
self.callee_def_id,
gen_args,
) {
let resolved_def_id = instance.def.def_id();
self.callee_def_id = resolved_def_id;
let resolved_ty = self.block_visitor.bv.tcx.type_of(resolved_def_id);
let resolved_map = self.type_visitor().get_generic_arguments_map(
resolved_def_id,
instance.substs,
&[],
);
let specialized_resolved_ty = self
.type_visitor()
.specialize_generic_argument_type(resolved_ty, &resolved_map);
trace!(
"devirtualize resolved def_id {:?}: {:?}",
resolved_def_id,
specialized_resolved_ty
);
let func_const = self
.block_visitor
.visit_function_reference(
resolved_def_id,
specialized_resolved_ty,
Some(instance.substs),
)
.clone();
self.callee_func_ref = if let ConstantDomain::Function(fr) = &func_const {
self.callee_known_name = fr.known_name;
Some(fr.clone())
} else {
None
};
self.callee_fun_val = Rc::new(func_const.into());
self.callee_generic_arguments = Some(instance.substs);
self.callee_generic_argument_map = self.type_visitor().get_generic_arguments_map(
resolved_def_id,
instance.substs,
&self.actual_argument_types,
);
let tcx = self.block_visitor.bv.tcx;
if specialized_resolved_ty.is_closure() && tcx.is_mir_available(resolved_def_id) {
let mir = tcx.optimized_mir(resolved_def_id);
if self.actual_argument_types.len() + 1 == mir.arg_count {
// When the closure has no captured variables, the first argument is just the function pointer.
// Sadly, MIR omits this argument (because the call is via a trait), so we have to add it here.
self.actual_args
.insert(0, (Path::new_parameter(1), self.callee_fun_val.clone()));
self.actual_argument_types.insert(
0,
tcx.mk_mut_ref(tcx.lifetimes.re_static, specialized_resolved_ty),
);
}
}
} else {
info!(
"could not resolve function {:?}, {:?}, {:?}",
self.callee_def_id, param_env, gen_args,
)
}
}
}
/// Extract a list of function references from an environment of function constant arguments
#[logfn_inputs(TRACE)]
fn get_function_constant_signature(
&mut self,
func_args: &[(Rc<Path>, Ty<'tcx>, Rc<AbstractValue>)],
) -> Option<Rc<Vec<Rc<FunctionReference>>>> {
if func_args.is_empty() {
return None;
}
let vec: Vec<Rc<FunctionReference>> = func_args
.iter()
.filter_map(|(_, _, v)| self.block_visitor.get_func_ref(v))
.collect();
if vec.is_empty() {
return None;
}
Some(Rc::new(vec))
}
/// Returns a summary of the function to call, obtained from the summary cache.
#[logfn_inputs(TRACE)]
pub fn get_function_summary(&mut self) -> Option<Summary> {
self.try_to_devirtualize();
if self.actual_argument_types.is_empty() {
self.block_visitor.bv.cv.call_graph.add_edge(
self.block_visitor.bv.def_id,
self.callee_def_id,
"".to_string().into_boxed_str(),
);
} else {
for ty in self.actual_argument_types.iter() {
self.block_visitor.bv.cv.call_graph.add_edge(
self.block_visitor.bv.def_id,
self.callee_def_id,
ty.to_string().into_boxed_str(),
);
}
}
if let Some(func_ref) = &self.callee_func_ref.clone() {
// If the actual arguments include any function constants, collect them together
// and pass them to get_summary_for_function_constant so that their signatures
// can be included in the type specific key that is used to look up non generic
// predefined summaries.
let func_args = self.get_function_constant_signature(self.function_constant_args);
let initial_type_cache = self.initial_type_cache.clone();
let call_depth = *self
.block_visitor
.bv
.active_calls_map
.get(&func_ref.def_id.unwrap())
.unwrap_or(&0u64);
let result = self
.block_visitor
.bv
.cv
.summary_cache
.get_summary_for_call_site(func_ref, &func_args, &initial_type_cache)
.clone();
if result.is_computed || func_ref.def_id.is_none() {
return Some(result);
}
if call_depth < 3 {
let mut summary =
self.create_and_cache_function_summary(&func_args, &initial_type_cache);
if call_depth >= 1 {
summary.post_condition = None;
// Widen summary at call level 1 so that the level 0 call sees the widened values.
if call_depth == 1 {
summary.widen_side_effects();
}
self.block_visitor
.bv
.cv
.summary_cache
.set_summary_for_call_site(
func_ref,
&func_args,
&self.initial_type_cache,
summary.clone(),
);
}
return Some(summary);
} else {
// Probably a statically unbounded self recursive call. Use an empty summary and let
// earlier calls do the joining and widening required.
let mut summary = Summary::default();
summary
.side_effects
.push((Path::new_result(), Rc::new(abstract_value::BOTTOM)));
summary.is_computed = true;
self.block_visitor
.bv
.cv
.summary_cache
.set_summary_for_call_site(
func_ref,
&func_args,
&self.initial_type_cache,
summary.clone(),
);
return Some(summary);
}
}
None
}
/// If this call is to an implementation of the std::clone::Clone::clone trait method
/// then make sure any model fields and tag fields are copied to the result as well.
/// If there is no MIR implementation available for the clone method, then fall back to a
/// deep copy (after calling deal_with_missing_summary).
#[logfn_inputs(DEBUG)]
pub fn handle_clone(&mut self, summary: &Summary) {
if let Some((place, _)) = &self.destination {
checked_assume!(self.actual_args.len() == 1);
let target_type = ExpressionType::from(
self.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0])
.kind(),
);
let source_path = Path::new_deref(
Path::get_as_path(self.actual_args[0].1.clone()),
target_type,
)
.canonicalize(&self.block_visitor.bv.current_environment);
let target_type = self
.type_visitor()
.get_rustc_place_type(place, self.block_visitor.bv.current_span);
let target_path = self.block_visitor.visit_rh_place(place);
if !summary.is_computed {
// Now just do a deep copy and carry on.
self.block_visitor.bv.copy_or_move_elements(
target_path,
source_path,
target_type,
false,
);
} else {
self.transfer_and_refine_into_current_environment(summary);
// Since the clone code is arbitrary it might not have copied model fields and tag fields.
// So just copy them again.
let value_map = self.block_visitor.bv.current_environment.value_map.clone();
for (path, value) in value_map.iter().filter(|(p, _)| {
if let PathEnum::QualifiedPath { selector, .. } = &p.value {
matches!(
**selector,
PathSelector::ModelField(..) | PathSelector::TagField
) && p.is_rooted_by(&source_path)
} else {
false
}
}) {
let target_path = path.replace_root(&source_path, target_path.clone());
self.block_visitor
.bv
.update_value_at(target_path, value.clone());
}
}
self.use_entry_condition_as_exit_condition();
} else {
assume_unreachable!();
}
}
/// If the current call is to a well known function for which we don't have a cached summary,
/// this function will update the environment as appropriate and return true. If the return
/// result is false, just carry on with the normal logic.
#[logfn_inputs(TRACE)]
pub fn handled_as_special_function_call(&mut self) -> bool {
match self.callee_known_name {
KnownNames::StdCloneClone => {
checked_assume!(self.actual_argument_types.len() == 1);
return self.handled_clone();
}
KnownNames::StdOpsFunctionFnCall
| KnownNames::StdOpsFunctionFnMutCallMut
| KnownNames::StdOpsFunctionFnOnceCallOnce
| KnownNames::StdSyncOnceCallOnce => {
self.inline_indirectly_called_function();
return true;
}
KnownNames::MiraiAbstractValue => {
checked_assume!(self.actual_args.len() == 1);
self.handle_abstract_value();
return true;
}
KnownNames::MiraiAddTag => {
checked_assume!(self.actual_args.len() == 1);
self.handle_add_tag();
return true;
}
KnownNames::MiraiAssume => {
checked_assume!(self.actual_args.len() == 1);
if self.block_visitor.bv.check_for_errors {
self.report_calls_to_special_functions();
}
self.handle_assume();
return true;
}
KnownNames::MiraiAssumePreconditions => {
checked_assume!(self.actual_args.is_empty());
self.block_visitor.bv.assume_preconditions_of_next_call = true;
return true;
}
KnownNames::MiraiDoesNotHaveTag => {
checked_assume!(self.actual_args.len() == 1);
self.handle_check_tag(false);
return true;
}
KnownNames::MiraiGetModelField => {
self.handle_get_model_field();
return true;
}
KnownNames::MiraiHasTag => {
checked_assume!(self.actual_args.len() == 1);
self.handle_check_tag(true);
return true;
}
KnownNames::MiraiPostcondition => {
checked_assume!(self.actual_args.len() == 3);
if self.block_visitor.bv.check_for_errors {
self.report_calls_to_special_functions();
}
self.handle_post_condition();
return true;
}
KnownNames::MiraiPreconditionStart => {
self.handle_precondition_start();
return true;
}
KnownNames::MiraiPrecondition => {
checked_assume!(self.actual_args.len() == 2);
self.handle_precondition();
self.handle_assume();
return true;
}
KnownNames::MiraiSetModelField => {
self.handle_set_model_field();
return true;
}
KnownNames::MiraiResult => {
if let Some((place, _)) = &self.destination {
let target_path = self.block_visitor.visit_rh_place(place);
let target_rustc_type = self
.type_visitor()
.get_rustc_place_type(place, self.block_visitor.bv.current_span);
let return_value_path = Path::new_result();
let return_value = self
.block_visitor
.bv
.lookup_path_and_refine_result(return_value_path, target_rustc_type);
self.block_visitor
.bv
.update_value_at(target_path, return_value);
} else {
assume_unreachable!();
}
self.use_entry_condition_as_exit_condition();
return true;
}
KnownNames::MiraiVerify => {
checked_assume!(self.actual_args.len() == 2);
if self.block_visitor.bv.check_for_errors {
self.report_calls_to_special_functions();
}
self.handle_assume();
return true;
}
KnownNames::RustDealloc => {
self.handle_rust_dealloc();
self.use_entry_condition_as_exit_condition();
return true;
}
KnownNames::StdFutureFromGenerator => {
checked_assume!(self.actual_args.len() == 1);
let generator_fun_val = self.actual_args[0].1.clone();
let generator_fun_ref = self
.block_visitor
.get_func_ref(&generator_fun_val)
.expect("a fun ref");
let generator_def_id = generator_fun_ref.def_id.expect("a def id");
let environment_before_call = self.block_visitor.bv.current_environment.clone();
let mut block_visitor = BlockVisitor::new(self.block_visitor.bv);
let mut generator_call_visitor = CallVisitor::new(
&mut block_visitor,
generator_def_id,
None,
None,
environment_before_call,
ConstantDomain::Function(generator_fun_ref),
);
self.block_visitor.bv.async_fn_summary =
generator_call_visitor.get_function_summary();
return true;
}
KnownNames::StdIntrinsicsCopy | KnownNames::StdIntrinsicsCopyNonOverlapping => {
self.handle_copy_non_overlapping();
return true;
}
KnownNames::StdIntrinsicsDiscriminantValue => {
self.handle_discriminant_value();
return true;
}
KnownNames::StdIntrinsicsTransmute => {
self.handle_transmute();
return true;
}
KnownNames::StdIntrinsicsWriteBytes => {
self.handle_write_bytes();
return true;
}
KnownNames::StdMemReplace => {
self.handle_mem_replace();
return true;
}
KnownNames::StdPtrSwapNonOverlapping => {
self.handle_swap_non_overlapping();
return true;
}
KnownNames::StdPanickingAssertFailed
| KnownNames::StdPanickingBeginPanic
| KnownNames::StdPanickingBeginPanicFmt => {
if self.block_visitor.bv.check_for_errors {
self.report_calls_to_special_functions();
}
if let Some((_, target)) = &self.destination {
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, abstract_value::FALSE.into());
}
return true;
}
_ => {
let result = self.try_to_inline_special_function();
if !result.is_bottom() {
if let Some((place, _)) = &self.destination {
let target_path = self.block_visitor.visit_lh_place(place);
self.block_visitor.bv.update_value_at(target_path, result);
self.use_entry_condition_as_exit_condition();
return true;
}
}
}
}
false
}
/// If the self parameter is &std::Option::None, the call to std::Clone::clone
/// can be handled without resolving the trait method to a concrete method.
#[logfn_inputs(TRACE)]
fn handled_clone(&mut self) -> bool {
precondition!(self.actual_argument_types.len() == 1);
if let TyKind::Ref(_, t, _) = self.actual_argument_types[0].kind() {
if let TyKind::Adt(def, substs) = t.kind() {
let variant_0 = VariantIdx::from_u32(0);
if Some(def.variants[variant_0].def_id)
== self.block_visitor.bv.tcx.lang_items().option_none_variant()
{
if let Some((place, _)) = &self.destination {
let target_path_discr =
Path::new_discriminant(self.block_visitor.visit_rh_place(place));
let arg0_discr_path = Path::new_discriminant(
Path::new_deref(
self.actual_args[0].0.clone(),
ExpressionType::NonPrimitive,
)
.canonicalize(&self.block_visitor.bv.current_environment),
);
let discr_ty = t.discriminant_ty(self.block_visitor.bv.tcx);
let discr_0_val = self.block_visitor.get_int_const_val(0, discr_ty);
let discr_val = self
.block_visitor
.bv
.lookup_path_and_refine_result(arg0_discr_path, discr_ty);
let is_zero = discr_val.equals(discr_0_val.clone());
let target_discr_value = match is_zero.as_bool_if_known() {
Some(false) => {
// Have to clone the Some(..) variant, let visit_caller take care of that
return false;
}
Some(true) => {
// Cloning is just copying the discriminant value
discr_0_val
}
None => {
// Might have to clone the Some(..) variant, so can't be handled here,
if let Some(promotable_is_zero) =
is_zero.extract_promotable_disjuncts(false)
{
// The caller might be able to avoid the diagnostic because it
// knows the actual argument whereas here we only know the type.
let specialized_substs = self.type_visitor().specialize_substs(
substs,
&self.callee_generic_argument_map,
);
if !utils::are_concrete(specialized_substs) {
// The clone method will not resolve, but we don't want visit_caller
// to issue a diagnostic because is_zero might refine to true
// further up the call stack. We deal with this by adding a
// precondition to the current function requiring that the
// caller (or on its callers) must ensure that is_zero will be true
// at runtime when this call is issued.
let precondition = Precondition {
condition: promotable_is_zero,
message: Rc::from("incomplete analysis of call because of failure to resolve std::Clone::clone method"),
provenance: None,
spans: vec![self.block_visitor.bv.current_span.source_callsite()],
};
self.block_visitor.bv.preconditions.push(precondition);
discr_0_val
} else {
// let visit_call issue a diagnostic
return false;
}
} else {
// let visit_call resolve the clone method and deal with it
return false;
}
}
};
self.block_visitor
.bv
.update_value_at(target_path_discr, target_discr_value);
self.use_entry_condition_as_exit_condition();
return true;
} else {
assume_unreachable!();
}
}
}
}
false
}
/// Use this for terminators that deterministically transfer control to a single successor block.
/// Such blocks, obviously, do not alter their entry path condition.
#[logfn_inputs(TRACE)]
fn use_entry_condition_as_exit_condition(&mut self) {
if let Some((_, target)) = &self.destination {
let exit_condition = self
.block_visitor
.bv
.current_environment
.entry_condition
.clone();
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, exit_condition);
}
}
/// If the function being called is a special function like mirai_annotations.mirai_verify or
/// std.panicking.begin_panic then report a diagnostic or create a precondition as appropriate.
#[logfn_inputs(TRACE)]
fn report_calls_to_special_functions(&mut self) {
precondition!(self.block_visitor.bv.check_for_errors);
match self.callee_known_name {
KnownNames::MiraiAssume => {
assume!(self.actual_args.len() == 1);
let (_, cond) = &self.actual_args[0];
let (cond_as_bool, entry_cond_as_bool) = self
.block_visitor
.bv
.check_condition_value_and_reachability(cond);
// If we never get here, rather call verify_unreachable!()
if !entry_cond_as_bool.unwrap_or(true) {
let span = self.block_visitor.bv.current_span.source_callsite();
let message =
"this is unreachable, mark it as such by using the verify_unreachable! macro";
let warning = self
.block_visitor
.bv
.cv
.session
.struct_span_warn(span, message);
self.block_visitor.bv.emit_diagnostic(warning);
return;
}
// If the condition is always true, this assumption is redundant. If false, the
// assumption is ignored. Otherwise, no diagnostics are emitted.
let message = if cond_as_bool == Some(true) {
"assumption is provably true and can be deleted"
} else if cond_as_bool == Some(false) {
"assumption is provably false and it will be ignored"
} else {
return;
};
let span = self.block_visitor.bv.current_span.source_callsite();
let warning = self
.block_visitor
.bv
.cv
.session
.struct_span_warn(span, message);
self.block_visitor.bv.emit_diagnostic(warning);
}
KnownNames::MiraiPostcondition => {
let actual_args = self.actual_args.clone();
assume!(actual_args.len() == 3); // The type checker ensures this.
let (_, assumption) = &actual_args[1];
let (_, cond) = &actual_args[0];
if !assumption.as_bool_if_known().unwrap_or(false) {
// Not an assumed post condition, so check the condition and only add this to
// the summary if it is reachable and true.
let message =
self.coerce_to_string(&Path::get_as_path(actual_args[2].1.clone()));
if self
.block_visitor
.check_special_function_condition(
cond,
message.as_ref(),
KnownNames::MiraiPostcondition,
)
.is_none()
{
self.block_visitor.try_extend_post_condition(cond);
}
} else {
self.block_visitor.try_extend_post_condition(cond);
}
}
KnownNames::MiraiVerify => {
let actual_args = self.actual_args.clone();
assume!(actual_args.len() == 2); // The type checker ensures this.
let (_, cond) = &actual_args[0];
let message = self.coerce_to_string(&Path::get_as_path(actual_args[1].1.clone()));
self.block_visitor.check_special_function_condition(
cond,
message.as_ref(),
KnownNames::MiraiVerify,
);
}
KnownNames::StdPanickingAssertFailed
| KnownNames::StdPanickingBeginPanic
| KnownNames::StdPanickingBeginPanicFmt => {
assume!(!self.actual_args.is_empty()); // The type checker ensures this.
let mut path_cond = self.block_visitor.might_be_reachable();
if !path_cond.unwrap_or(true) {
// We never get to this call, so nothing to report.
return;
}
let msg = match self.callee_known_name {
KnownNames::StdPanickingAssertFailed => Rc::from("assertion failed"),
KnownNames::StdPanickingBeginPanic => {
self.coerce_to_string(&Path::get_as_path(self.actual_args[0].1.clone()))
}
_ => {
let arguments_struct_path =
Path::get_as_path(self.actual_args[0].1.clone());
let pieces_path_fat = Path::new_field(arguments_struct_path, 0)
.canonicalize(&self.block_visitor.bv.current_environment);
let pieces_path_thin = Path::new_field(pieces_path_fat, 0);
let index = Rc::new(0u128.into());
let piece0_path_fat = Path::new_index(pieces_path_thin, index)
.canonicalize(&self.block_visitor.bv.current_environment);
self.coerce_to_string(&piece0_path_fat)
}
};
if msg.contains("entered unreachable code")
|| msg.contains("not yet implemented")
|| msg.starts_with("unrecoverable: ")
{
// We treat unreachable!() as an assumption rather than an assertion to prove.
// unimplemented!() is unlikely to be a programmer mistake, so need to fixate on that either.
// unrecoverable! is way for the programmer to indicate that termination is not a mistake.
return;
} else if path_cond.is_none() && msg.as_ref() == "statement is reachable" {
// verify_unreachable should always complain if possibly reachable
// and the current function is public or root.
path_cond = Some(true);
};
let span = self.block_visitor.bv.current_span.source_callsite();
if path_cond.unwrap_or(false)
&& self.block_visitor.bv.function_being_analyzed_is_root()
{
// We always get to this call and we have to assume that the function will
// get called, so keep the message certain.
// Don't, however, complain about panics in the standard contract summaries
if std::env::var("MIRAI_START_FRESH").is_err() {
let warning = self
.block_visitor
.bv
.cv
.session
.struct_span_warn(span, msg.as_ref());
self.block_visitor.bv.emit_diagnostic(warning);
} else {
// If we see an unconditional panic inside a standard contract summary,
// make it into an unsatisfiable precondition.
let precondition = Precondition {
condition: Rc::new(abstract_value::FALSE),
message: msg,
provenance: None,
spans: vec![],
};
self.block_visitor.bv.preconditions.push(precondition);
}
} else {
// We might get to this call, depending on the state at the call site.
//
if msg.contains("Post-condition of ") || msg.contains("Invariant of ") {
// Dealing with contracts crate
if self.block_visitor.bv.function_being_analyzed_is_root() {
let msg = msg.replace(" violated", " possibly violated");
let warning = self
.block_visitor
.bv
.cv
.session
.struct_span_warn(span, msg.as_ref());
self.block_visitor.bv.emit_diagnostic(warning);
}
return;
}
// In the case when an assert macro has been called, the inverse of the assertion
// was conjoined into the entry condition and this condition was simplified.
// We therefore cannot distinguish the case of maybe reaching a definitely
// false assertion from the case of definitely reaching a maybe false assertion.
//
// Since the assert and panic macros are commonly used to create preconditions
// it would be very inconvenient if this possibly false assertion were reported
// as a problem since there would be no way to shut it up. We therefore do not
// report this and instead insist that anyone who wants to have MIRAI check
// their assertions should use the mirai_annotations::verify! macro instead.
//
// We **do** have to push a precondition since this is the probable intent.
if let Some(promotable_entry_condition) = self
.block_visitor
.bv
.current_environment
.entry_condition
.extract_promotable_conjuncts(false)
{
let condition = promotable_entry_condition.logical_not();
let precondition = Precondition {
condition,
message: msg,
provenance: None,
spans: if self.block_visitor.bv.def_id.is_local() {
vec![span]
} else {
vec![] // The span is likely inside a standard macro, i.e. panic! etc.
},
};
self.block_visitor.bv.preconditions.push(precondition);
} else {
// If the assertion cannot be promoted because the caller cannot
// satisfy it (because it contains a reference to local variable),
// then we need to produce a diagnostic after all, but only if this
// a local function (i.e. a function in the crate being analyzed).
if self.block_visitor.bv.def_id.is_local() {
let warning = self
.block_visitor
.bv
.cv
.session
.struct_span_warn(span, msg.as_ref());
self.block_visitor.bv.emit_diagnostic(warning);
} else {
// Since the assertion occurs in code that is being used rather than
// analyzed, we'll assume that the code is correct and the analyzer
// discovered a false positive.
}
}
}
}
_ => assume_unreachable!(),
}
}
/// Provides special handling of functions that have no MIR bodies or that need to access
/// internal MIRAI state in ways that cannot be expressed in normal Rust and therefore
/// cannot be summarized in the standard_contracts crate.
/// Returns the result of the call, or BOTTOM if the function to call is not a known
/// special function.
#[allow(clippy::cognitive_complexity)]
#[logfn_inputs(TRACE)]
fn try_to_inline_special_function(&mut self) -> Rc<AbstractValue> {
match self.callee_known_name {
KnownNames::RustAlloc => self.handle_rust_alloc(),
KnownNames::RustAllocZeroed => self.handle_rust_alloc_zeroed(),
KnownNames::RustRealloc => self.handle_rust_realloc(),
KnownNames::StdIntrinsicsArithOffset => self.handle_arith_offset(),
KnownNames::StdIntrinsicsBitreverse
| KnownNames::StdIntrinsicsBswap
| KnownNames::StdIntrinsicsCtlz
| KnownNames::StdIntrinsicsCtpop
| KnownNames::StdIntrinsicsCttz => {
checked_assume!(self.actual_args.len() == 1);
let arg_type = ExpressionType::from(self.actual_argument_types[0].kind());
let bit_length = arg_type.bit_length();
self.actual_args[0]
.1
.intrinsic_bit_vector_unary(bit_length, self.callee_known_name)
}
KnownNames::StdIntrinsicsCtlzNonzero | KnownNames::StdIntrinsicsCttzNonzero => {
checked_assume!(self.actual_args.len() == 1);
if self.block_visitor.bv.check_for_errors {
let non_zero = self.actual_args[0].1.not_equals(Rc::new(0u128.into()));
if let Some(warning) = self.block_visitor.check_special_function_condition(
&non_zero,
"argument is zero",
self.callee_known_name,
) {
// The condition may be reachable and false. Promote it to a precondition if possible.
match (
self.block_visitor
.bv
.current_environment
.entry_condition
.extract_promotable_conjuncts(false),
non_zero.extract_promotable_disjuncts(false),
) {
(Some(promotable_entry_condition), Some(promotable_non_zero))
if self.block_visitor.bv.preconditions.len()
< k_limits::MAX_INFERRED_PRECONDITIONS =>
{
let condition = promotable_entry_condition
.logical_not()
.or(promotable_non_zero);
let precondition = Precondition {
condition,
message: warning,
provenance: None,
spans: vec![self.block_visitor.bv.current_span],
};
self.block_visitor.bv.preconditions.push(precondition);
}
_ => {
let warning = self.block_visitor.bv.cv.session.struct_span_warn(
self.block_visitor.bv.current_span,
warning.as_ref(),
);
self.block_visitor.bv.emit_diagnostic(warning);
}
}
}
}
let arg_type = ExpressionType::from(self.actual_argument_types[0].kind());
let bit_length = arg_type.bit_length();
self.actual_args[0]
.1
.intrinsic_bit_vector_unary(bit_length, self.callee_known_name)
}
KnownNames::StdIntrinsicsCeilf32
| KnownNames::StdIntrinsicsCeilf64
| KnownNames::StdIntrinsicsCosf32
| KnownNames::StdIntrinsicsCosf64
| KnownNames::StdIntrinsicsExp2f32
| KnownNames::StdIntrinsicsExp2f64
| KnownNames::StdIntrinsicsExpf32
| KnownNames::StdIntrinsicsExpf64
| KnownNames::StdIntrinsicsFabsf32
| KnownNames::StdIntrinsicsFabsf64
| KnownNames::StdIntrinsicsFloorf32
| KnownNames::StdIntrinsicsFloorf64
| KnownNames::StdIntrinsicsLog10f32
| KnownNames::StdIntrinsicsLog10f64
| KnownNames::StdIntrinsicsLog2f32
| KnownNames::StdIntrinsicsLog2f64
| KnownNames::StdIntrinsicsLogf32
| KnownNames::StdIntrinsicsLogf64
| KnownNames::StdIntrinsicsNearbyintf32
| KnownNames::StdIntrinsicsNearbyintf64
| KnownNames::StdIntrinsicsRintf32
| KnownNames::StdIntrinsicsRintf64
| KnownNames::StdIntrinsicsRoundf32
| KnownNames::StdIntrinsicsRoundf64
| KnownNames::StdIntrinsicsSinf32
| KnownNames::StdIntrinsicsSinf64
| KnownNames::StdIntrinsicsSqrtf32
| KnownNames::StdIntrinsicsSqrtf64
| KnownNames::StdIntrinsicsTruncf32
| KnownNames::StdIntrinsicsTruncf64 => {
checked_assume!(self.actual_args.len() == 1);
self.actual_args[0]
.1
.intrinsic_floating_point_unary(self.callee_known_name)
}
KnownNames::StdIntrinsicsCopysignf32
| KnownNames::StdIntrinsicsCopysignf64
| KnownNames::StdIntrinsicsFaddFast
| KnownNames::StdIntrinsicsFdivFast
| KnownNames::StdIntrinsicsFmulFast
| KnownNames::StdIntrinsicsFremFast
| KnownNames::StdIntrinsicsFsubFast
| KnownNames::StdIntrinsicsMaxnumf32
| KnownNames::StdIntrinsicsMaxnumf64
| KnownNames::StdIntrinsicsMinnumf32
| KnownNames::StdIntrinsicsMinnumf64
| KnownNames::StdIntrinsicsPowf32
| KnownNames::StdIntrinsicsPowf64
| KnownNames::StdIntrinsicsPowif32
| KnownNames::StdIntrinsicsPowif64 => {
checked_assume!(self.actual_args.len() == 2);
self.actual_args[0]
.1
.intrinsic_binary(self.actual_args[1].1.clone(), self.callee_known_name)
}
KnownNames::StdIntrinsicsMinAlignOfVal => self.handle_min_align_of_val(),
KnownNames::StdIntrinsicsMulWithOverflow => self.handle_checked_binary_operation(),
KnownNames::StdIntrinsicsOffset => self.handle_offset(),
KnownNames::StdIntrinsicsRawEq => self.handle_raw_eq(),
KnownNames::StdIntrinsicsSizeOf => self.handle_size_of(),
KnownNames::StdIntrinsicsSizeOfVal => self.handle_size_of_val(),
KnownNames::StdSliceCmpMemcmp => self.handle_memcmp(),
_ => abstract_value::BOTTOM.into(),
}
}
/// Fn::call, FnMut::call_mut, FnOnce::call_once all receive two arguments:
/// 1. A function pointer or closure instance to call.
/// 2. A tuple of argument values for the call.
/// The tuple is unpacked and the callee is then invoked with its normal function signature.
/// In the case of calling a closure, the closure signature includes the closure as the first argument.
///
/// Sync::Once::call_once receives two arguments
/// 1. A self pointer to the Once object
/// 2. The closure instance to call.
///
/// All of this happens in code that is not encoded as MIR, so MIRAI needs built in support for it.
#[logfn_inputs(TRACE)]
fn inline_indirectly_called_function(&mut self) {
checked_assume!(self.actual_args.len() == 2);
trace!("self.actual_args {:?}", self.actual_args);
trace!(
"self.actual_argument_types {:?}",
self.actual_argument_types
);
trace!(
"self.function_constant_args {:?}",
self.function_constant_args
);
// Get the function to call (it is either a function pointer or a closure)
let callee = if self.callee_known_name == KnownNames::StdSyncOnceCallOnce {
self.actual_args[1].1.clone()
} else {
self.actual_args[0].1.clone()
};
// Get the path of the tuple containing the arguments.
let callee_arg_array_path = self.actual_args[1].0.clone();
// Unpack the arguments. We use the generic arguments of the caller as a proxy for the callee function signature.
let generic_argument_types: Vec<Ty<'tcx>> = self
.callee_generic_arguments
.expect("call_once, etc. are generic")
.as_ref()
.iter()
.map(|gen_arg| gen_arg.expect_ty())
.collect();
let mut actual_argument_types: Vec<Ty<'tcx>>;
if self.callee_known_name == KnownNames::StdSyncOnceCallOnce {
actual_argument_types = vec![];
} else {
checked_assume!(generic_argument_types.len() == 2);
if let TyKind::Tuple(tuple_types) = generic_argument_types[1].kind() {
actual_argument_types = tuple_types
.iter()
.map(|gen_arg| gen_arg.expect_ty())
.collect();
} else {
assume_unreachable!("expected second type argument to be a tuple type");
}
}
let mut actual_args: Vec<(Rc<Path>, Rc<AbstractValue>)> = actual_argument_types
.iter()
.enumerate()
.map(|(i, t)| {
let arg_path = Path::new_field(callee_arg_array_path.clone(), i);
let arg_val = self
.block_visitor
.bv
.lookup_path_and_refine_result(arg_path.clone(), t);
(arg_path, arg_val)
})
.collect();
// Prepend the closure (if there is one) to the unpacked arguments vector.
// Also update the Self parameter in the arguments map.
let mut closure_ty = if self.callee_known_name == KnownNames::StdSyncOnceCallOnce {
self.actual_argument_types[1]
} else {
self.actual_argument_types[0]
};
let closure_ref_ty;
if let TyKind::Ref(_, ty, _) = closure_ty.kind() {
closure_ref_ty = closure_ty;
closure_ty = ty;
} else {
let tcx = self.block_visitor.bv.tcx;
closure_ref_ty = tcx.mk_mut_ref(tcx.lifetimes.re_static, closure_ty);
}
let mut argument_map = self.callee_generic_argument_map.clone();
if closure_ty.is_closure() {
if self.callee_known_name != KnownNames::StdSyncOnceCallOnce {
let closure_path = self.actual_args[0].0.clone();
let closure_reference = AbstractValue::make_reference(closure_path);
actual_args.insert(
0,
(
Path::get_as_path(closure_reference.clone()),
closure_reference,
),
);
actual_argument_types.insert(0, closure_ref_ty);
}
if let TyKind::Closure(def_id, substs) = closure_ty.kind() {
argument_map = self.type_visitor().get_generic_arguments_map(
*def_id,
substs.as_closure().substs,
&[],
);
}
}
let function_constant_args = self
.block_visitor
.get_function_constant_args(&actual_args, &actual_argument_types);
let callee_func_ref = self.block_visitor.get_func_ref(&callee);
if let Some(func_ref) = &callee_func_ref {
let func_const = ConstantDomain::Function(func_ref.clone());
let def_id = func_ref.def_id.expect("defined when used here");
if !closure_ty.is_closure() && self.block_visitor.bv.tcx.is_closure(def_id) {
// The function appears to be a closure with no captures, so provide the function pointer as the closure state
actual_args.insert(0, self.actual_args[0].clone());
actual_argument_types.insert(0, closure_ref_ty);
}
let generic_arguments = self.block_visitor.bv.cv.substs_cache.get(&def_id).cloned();
if let Some(substs) = generic_arguments {
argument_map = self.type_visitor().get_generic_arguments_map(
def_id,
substs,
&actual_argument_types,
)
}
let environment_before_call = self.block_visitor.bv.current_environment.clone();
let mut block_visitor = BlockVisitor::new(self.block_visitor.bv);
let mut indirect_call_visitor = CallVisitor::new(
&mut block_visitor,
def_id,
generic_arguments,
argument_map,
environment_before_call,
func_const,
);
indirect_call_visitor.actual_args = actual_args;
indirect_call_visitor.actual_argument_types = actual_argument_types;
indirect_call_visitor.function_constant_args = &function_constant_args;
indirect_call_visitor.callee_fun_val = callee.clone();
indirect_call_visitor.callee_known_name = KnownNames::None;
indirect_call_visitor.destination = self.destination;
let summary = indirect_call_visitor.get_function_summary();
if let Some(summary) = summary {
if summary.is_computed {
indirect_call_visitor.transfer_and_refine_into_current_environment(&summary);
}
if summary.is_incomplete
&& self
.block_visitor
.bv
.already_reported_errors_for_call_to
.insert(callee)
{
let saved_callee_def_id = self.callee_def_id;
self.callee_def_id = def_id;
self.report_incomplete_summary();
self.callee_def_id = saved_callee_def_id;
}
return;
}
};
if self
.block_visitor
.bv
.already_reported_errors_for_call_to
.insert(callee.clone())
{
debug!("unknown callee {:?}", callee);
self.block_visitor.report_missing_summary();
}
}
/// Replace the call result with an abstract value of the same type as the
/// destination place.
#[logfn_inputs(TRACE)]
fn handle_abstract_value(&mut self) {
if let Some((place, target)) = &self.destination {
let path = self.block_visitor.visit_rh_place(place);
let expression_type = self
.type_visitor()
.get_place_type(place, self.block_visitor.bv.current_span);
let abstract_value = AbstractValue::make_typed_unknown(expression_type, path.clone());
self.block_visitor.bv.update_value_at(path, abstract_value);
let exit_condition = self
.block_visitor
.bv
.current_environment
.entry_condition
.clone();
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, exit_condition);
} else {
assume_unreachable!();
}
}
/// Attach a tag to the first and only value in actual_args.
/// The tag type is indicated by a generic argument.
#[logfn_inputs(TRACE)]
fn handle_add_tag(&mut self) {
precondition!(self.actual_args.len() == 1);
if let Some(tag) = self.extract_tag_kind_and_propagation_set() {
let (source_path, source_rustc_type) = self.deref_tag_source();
trace!("MiraiAddTag: tagging {:?} with {:?}", source_path, tag);
// Check if the tagged value has a pointer type (e.g., a reference).
// Emit an warning message if so.
if self.block_visitor.bv.check_for_errors && source_rustc_type.is_any_ptr() {
let warning = self.block_visitor.bv.cv.session.struct_span_err(
self.block_visitor.bv.current_span,
"the macro add_tag! expects its argument to be a reference to a non-reference value",
);
self.block_visitor.bv.emit_diagnostic(warning);
}
// Augment the tags associated at the source with a new tag.
self.block_visitor
.bv
.attach_tag_to_elements(tag, source_path, source_rustc_type);
}
// Update exit conditions.
self.use_entry_condition_as_exit_condition();
}
/// Returns a canonicalized dereferenced path to the first argument, along with the dereferenced
/// rustc type. If the dereferenced argument is a slice pointer, or a box, then return the
/// thin pointer path to the dereferenced value. In the case of a box, the argument path will
/// be a reference to the box, so the dereferenced thin pointer path will be (*p).0.0.
#[logfn_inputs(TRACE)]
fn deref_tag_source(&mut self) -> (Rc<Path>, Ty<'tcx>) {
precondition!(self.actual_args.len() == 1);
let source_pointer_path = self.actual_args[0].0.clone();
let source_pointer_rustc_type = self.actual_argument_types[0];
let mut source_rustc_type = self
.type_visitor()
.get_dereferenced_type(source_pointer_rustc_type);
let target_type = ExpressionType::from(source_rustc_type.kind());
let source_thin_pointer_path = if source_rustc_type.is_box() {
source_rustc_type = source_rustc_type.boxed_ty();
let box_path = Path::new_deref(source_pointer_path, target_type.clone())
.canonicalize(&self.block_visitor.bv.current_environment);
Path::new_field(Path::new_field(box_path, 0), 0)
} else if self
.type_visitor()
.is_slice_pointer(source_pointer_rustc_type.kind())
{
Path::new_field(source_pointer_path, 0)
} else {
source_pointer_path
};
let deref_path = Path::new_deref(source_thin_pointer_path, target_type)
.canonicalize(&self.block_visitor.bv.current_environment);
(deref_path, source_rustc_type)
}
/// Adds the first and only value in actual_args to the path condition of the destination.
/// No check is performed, since we get to assume this condition without proof.
#[logfn_inputs(TRACE)]
fn handle_assume(&mut self) {
precondition!(self.actual_args.len() == 1);
let assumed_condition = &self.actual_args[0].1;
// Ignore assertion of the assumed condition, when it is provably false
// or add the condition to assertion.
let exit_condition = if let Some(false) = assumed_condition.as_bool_if_known() {
self.block_visitor
.bv
.current_environment
.entry_condition
.clone()
} else {
// Give the assumed condition priority over the existing conjuncts when the and expression
// size overflows.
let assumed_condition =
AbstractValue::make_from(assumed_condition.expression.clone(), 0);
self.block_visitor
.bv
.current_environment
.entry_condition
.and(assumed_condition)
};
if let Some((_, target)) = &self.destination {
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, exit_condition);
} else {
assume_unreachable!();
}
if let Some(cleanup_target) = self.cleanup {
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(cleanup_target, abstract_value::FALSE.into());
}
}
/// Check if a tag has been attached to the first and only value in actual_args.
/// The tag type is indicated by a generic argument.
#[logfn_inputs(TRACE)]
fn handle_check_tag(&mut self, checking_presence: bool) {
precondition!(self.actual_args.len() == 1);
let result: Option<Rc<AbstractValue>>;
if let Some(tag) = self.extract_tag_kind_and_propagation_set() {
let (source_path, source_rustc_type) = self.deref_tag_source();
trace!(
"MiraiCheckTag: checking if {:?} has {}been tagged with {:?}",
source_path,
(if checking_presence { "" } else { "never " }),
tag,
);
// Check if the tagged value has a pointer type (e.g., a reference).
// Emit a warning message if so.
if self.block_visitor.bv.check_for_errors && source_rustc_type.is_any_ptr() {
let warning = self.block_visitor.bv.cv.session.struct_span_warn(
self.block_visitor.bv.current_span,
format!(
"the macro {} expects its first argument to be a reference to a non-reference value",
if checking_presence { "has_tag! "} else { "does_not_have_tag!" },
).as_str(),
);
self.block_visitor.bv.emit_diagnostic(warning);
}
// Get the value to check for the presence or absence of the tag
let (tag_field_path, tag_field_value) = self.get_possibly_tagged_value(
tag,
checking_presence,
source_path.clone(),
source_rustc_type,
);
// Decide the result of has_tag! or does_not_have_tag!.
let mut check_result =
AbstractValue::make_tag_check(tag_field_value, tag, checking_presence);
// If the tag can be propagated through sub-components we need to check the tag on the
// values that can contain source_path as a sub-component.
// Operationally, source_path is a qualified path and we check if any of its prefixes
// has the tag (when checking_presence = true), or if all of its prefixes does not have
// the tag (when checking_presence = false).
if tag.is_propagated_by(TagPropagation::SubComponent) {
let mut path_prefix = &tag_field_path;
while let PathEnum::QualifiedPath { qualifier, .. } = &path_prefix.value {
path_prefix = qualifier;
let path_prefix_rustc_type = self
.type_visitor()
.get_path_rustc_type(path_prefix, self.block_visitor.bv.current_span);
if !path_prefix_rustc_type.is_scalar() {
let tag_field_value = self
.block_visitor
.bv
.extract_tag_field_of_non_scalar_value_at(
path_prefix,
path_prefix_rustc_type,
)
.1;
if checking_presence {
// We are checking presence of a tag. It is equivalent to *any* prefix having the tag.
// Thus we use a logical or.
check_result = check_result.or(AbstractValue::make_tag_check(
tag_field_value,
tag,
checking_presence,
));
// Exits the loop if check_result is already true.
if check_result.as_bool_if_known().unwrap_or(false) {
break;
}
} else {
// We are checking absence of a tag. It is equivalent to *all* prefixes not having the tag.
// Thus we use a logical and.
check_result = check_result.and(AbstractValue::make_tag_check(
tag_field_value,
tag,
checking_presence,
));
// Exits the loop if check_result is already false.
if !check_result.as_bool_if_known().unwrap_or(true) {
break;
}
}
}
}
}
if tag.is_propagated_by(TagPropagation::SuperComponent) {
let value_map = self.block_visitor.bv.current_environment.value_map.clone();
for (_, value) in value_map
.iter()
.filter(|(p, _)| p.is_rooted_by(&source_path))
{
let mut value = value.clone();
if let Expression::Reference(p) = &value.expression {
if let PathEnum::HeapBlock { .. } = &p.value {
let layout_field = Path::new_layout(p.clone());
let (_, tag_field_value) = self
.block_visitor
.bv
.extract_tag_field_of_non_scalar_value_at(
&layout_field,
self.block_visitor.bv.tcx.types.trait_object_dummy_self,
);
value = tag_field_value.clone();
}
}
if checking_presence {
// We are checking presence of a tag. It is equivalent to *any* prefix having the tag.
// Thus we use a logical or.
check_result = check_result.or(AbstractValue::make_tag_check(
value,
tag,
checking_presence,
));
// Exits the loop if check_result is already true.
if check_result.as_bool_if_known().unwrap_or(false) {
break;
}
} else {
// We are checking absence of a tag. It is equivalent to *all* prefixes not having the tag.
// Thus we use a logical and.
check_result = check_result.and(AbstractValue::make_tag_check(
value,
tag,
checking_presence,
));
// Exits the loop if check_result is already false.
if !check_result.as_bool_if_known().unwrap_or(true) {
break;
}
}
}
}
result = Some(check_result);
} else {
result = None;
}
// Return the abstract result and update exit conditions.
let destination = &self.destination;
if let Some((place, _)) = destination {
let target_path = self.block_visitor.visit_rh_place(place);
self.block_visitor.bv.update_value_at(
target_path.clone(),
result.unwrap_or_else(|| {
AbstractValue::make_typed_unknown(ExpressionType::Bool, target_path)
}),
);
} else {
assume_unreachable!("expected the function call has a destination");
}
self.use_entry_condition_as_exit_condition();
}
/// Get the possibly tagged value associated with source_path.
/// It the value at source path is a scalar value, it will just be that value.
/// If the value at source path is a structured value, it will be the value of its $tag field.
/// If the value at source path is a structure value that does not have a $tag field, but
/// the tag propagates to super components, it will be the tag field of a component of the
/// structure, if there is one.
#[logfn_inputs(TRACE)]
fn get_possibly_tagged_value(
&mut self,
tag: Tag,
checking_presence: bool,
source_path: Rc<Path>,
source_rustc_type: Ty<'tcx>,
) -> (Rc<Path>, Rc<AbstractValue>) {
if tag.is_propagated_by(TagPropagation::SuperComponent) {
match &source_path.value {
PathEnum::Computed { value } => match &value.expression {
Expression::ConditionalExpression {
condition,
consequent,
alternate,
} => {
let consequent_tag_value = self
.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(consequent.clone()),
source_rustc_type,
)
.1;
let alternate_tag_value = self
.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(alternate.clone()),
source_rustc_type,
)
.1;
return (
source_path.clone(),
condition
.conditional_expression(consequent_tag_value, alternate_tag_value),
);
}
Expression::Join { left, right, path } => {
let left_tag_value = self
.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(left.clone()),
source_rustc_type,
)
.1;
let right_tag_value = self
.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(right.clone()),
source_rustc_type,
)
.1;
let tag_path = Path::new_tag_field(path.clone());
return (
source_path.clone(),
left_tag_value.join(right_tag_value, &tag_path),
);
}
Expression::Offset { left, .. } => {
let left_tag_value = self
.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(left.clone()),
source_rustc_type,
)
.1;
return (source_path.clone(), left_tag_value);
}
Expression::Reference(path)
| Expression::Variable { path, .. }
| Expression::InitialParameterValue { path, .. } => {
return self.get_possibly_tagged_value(
tag,
checking_presence,
path.clone(),
source_rustc_type,
);
}
Expression::WidenedJoin { operand, path } => {
let operand_tag_value = self
.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(operand.clone()),
source_rustc_type,
)
.1;
let tag_path = Path::new_tag_field(path.clone());
return (tag_path.clone(), operand_tag_value.widen(&tag_path));
}
_ => {}
},
PathEnum::HeapBlock { .. } => {
let layout_field = Path::new_layout(source_path.clone());
let (_, tag_field_value) = self
.block_visitor
.bv
.extract_tag_field_of_non_scalar_value_at(
&layout_field,
self.block_visitor.bv.tcx.types.trait_object_dummy_self,
);
return (layout_field, tag_field_value);
}
PathEnum::Offset { value, .. } => {
if let Expression::Offset { left, .. } = &value.expression {
return self.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(left.clone()),
source_rustc_type,
);
}
}
PathEnum::QualifiedPath {
qualifier,
selector,
..
} if **selector == PathSelector::Deref => {
let val_at = self
.block_visitor
.bv
.current_environment
.value_at(qualifier)
.cloned();
return if let Some(value) = val_at {
match &value.expression {
Expression::Variable { path, .. }
| Expression::InitialParameterValue { path, .. } => {
let ty = self
.type_visitor()
.get_path_rustc_type(path, self.block_visitor.bv.current_span);
self.get_possibly_tagged_value(
tag,
checking_presence,
path.clone(),
ty,
)
}
Expression::Offset { left, .. } => {
let target_type = ExpressionType::from(source_rustc_type.kind());
let deref_value = left.dereference(target_type);
self.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(deref_value),
source_rustc_type,
)
}
_ => {
let target_type = ExpressionType::from(source_rustc_type.kind());
let deref_value = value.dereference(target_type);
self.get_possibly_tagged_value(
tag,
checking_presence,
Path::new_computed(deref_value),
source_rustc_type,
)
}
}
} else {
let ty = self
.type_visitor()
.get_path_rustc_type(qualifier, self.block_visitor.bv.current_span);
self.get_possibly_tagged_value(
tag,
checking_presence,
qualifier.clone(),
ty,
)
};
}
_ => {
debug!("path val {:?}", source_path.value);
}
}
}
// If the value located at source_path has sub-components, extract its tag field.
// Otherwise, the source value is a scalar, i.e., tags are associated with it directly,
// so we use the value itself as the tag field value.
if !source_rustc_type.is_scalar() {
self.block_visitor
.bv
.extract_tag_field_of_non_scalar_value_at(&source_path, source_rustc_type)
} else {
(
source_path.clone(),
self.block_visitor
.bv
.lookup_path_and_refine_result(source_path, source_rustc_type),
)
}
}
/// Update the state so that the call result is the value of the model field (or the default
/// value if there is no field).
#[logfn_inputs(TRACE)]
fn handle_get_model_field(&mut self) {
let destination = self.destination;
if let Some((place, _)) = &destination {
let target_type = self
.type_visitor()
.get_rustc_place_type(place, self.block_visitor.bv.current_span);
checked_assume!(self.actual_args.len() == 3);
// The current value, if any, of the model field are a set of (path, value) pairs
// where each path is rooted by qualifier.model_field(..)
let mut qualifier = Path::get_as_path(self.actual_args[0].1.clone());
if matches!(&self.actual_argument_types[0].kind(), TyKind::Ref { .. }) {
let target_type = ExpressionType::from(
self.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0])
.kind(),
);
qualifier = Path::new_deref(qualifier, target_type);
}
let field_name =
self.coerce_to_string(&Path::get_as_path(self.actual_args[1].1.clone()));
let source_path = Path::new_model_field(qualifier, field_name)
.canonicalize(&self.block_visitor.bv.current_environment);
let target_path = self.block_visitor.visit_rh_place(place);
if self
.block_visitor
.bv
.current_environment
.value_at(&source_path)
.is_some()
{
// Move the model field (path, val) pairs to the target (i.e. the place where
// the return value of call to the mirai_get_model_field function would go if
// it were a normal call.
self.block_visitor.bv.copy_or_move_elements(
target_path,
source_path,
target_type,
true,
);
} else {
// If there is no value for the model field in the environment, we should
// use the default value, but only if the qualifier is not rooted in a parameter
// value since only the caller will know what the values of the fields are.
match &self.actual_args[0].1.expression {
Expression::Reference(path)
| Expression::InitialParameterValue { path, .. }
| Expression::Variable { path, .. }
if path.is_rooted_by_parameter() =>
{
//todo: if the default value is a non primitive then we lose the structure
// using the code below. That is wrong. Generalize the default field.
let rval = AbstractValue::make_from(
Expression::UnknownModelField {
path: source_path,
default: self.actual_args[2].1.clone(),
},
1,
);
self.block_visitor.bv.update_value_at(target_path, rval);
}
_ => {
let source_path = Path::get_as_path(self.actual_args[2].1.clone());
self.block_visitor.bv.copy_or_move_elements(
target_path,
source_path,
target_type,
true,
);
}
}
}
self.use_entry_condition_as_exit_condition();
} else {
assume_unreachable!();
}
}
fn handle_post_condition(&mut self) {
precondition!(self.actual_args.len() == 3);
let condition = self.actual_args[0].1.clone();
let exit_condition = self
.block_visitor
.bv
.current_environment
.entry_condition
.and(condition);
if let Some((_, target)) = &self.destination {
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, exit_condition);
} else {
assume_unreachable!();
}
}
/// It is bad style for a precondition to be reached conditionally, since well, that condition
/// should be part of the precondition.
#[logfn_inputs(TRACE)]
fn handle_precondition_start(&mut self) {
if self.block_visitor.bv.check_for_errors
&& self.block_visitor.bv.check_for_unconditional_precondition
&& !self
.block_visitor
.bv
.current_environment
.entry_condition
.as_bool_if_known()
.unwrap_or(false)
{
let span = self.block_visitor.bv.current_span;
let warning = self
.block_visitor
.bv
.cv
.session
.struct_span_warn(span, "preconditions should be reached unconditionally");
self.block_visitor.bv.emit_diagnostic(warning);
self.block_visitor.bv.check_for_unconditional_precondition = false;
}
let exit_condition = self
.block_visitor
.bv
.current_environment
.entry_condition
.clone();
if let Some((_, target)) = &self.destination {
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, exit_condition);
} else {
assume_unreachable!();
}
}
/// Adds the first and only value in actual_args to the current list of preconditions.
/// No check is performed, since we get to assume the caller has verified this condition.
#[logfn_inputs(TRACE)]
fn handle_precondition(&mut self) {
precondition!(self.actual_args.len() == 2);
if self.block_visitor.bv.check_for_errors {
let condition = self.actual_args[0].1.clone();
//todo: give diagnostic if the condition contains a local variable.
let message = self.coerce_to_string(&Path::get_as_path(self.actual_args[1].1.clone()));
let precondition = Precondition {
condition,
message,
provenance: None,
spans: vec![self.block_visitor.bv.current_span],
};
self.block_visitor.bv.preconditions.push(precondition);
}
}
/// Update the state to reflect the assignment of the model field.
#[logfn_inputs(TRACE)]
fn handle_set_model_field(&mut self) {
checked_assume!(self.actual_args.len() == 3);
let destination = self.destination;
if let Some((_, target)) = &destination {
let mut qualifier = Path::get_as_path(self.actual_args[0].1.clone());
if matches!(&self.actual_argument_types[0].kind(), TyKind::Ref { .. }) {
let target_type = ExpressionType::from(
self.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0])
.kind(),
);
qualifier = Path::new_deref(qualifier, target_type);
}
let field_name =
self.coerce_to_string(&Path::get_as_path(self.actual_args[1].1.clone()));
let target_path = Path::new_model_field(qualifier, field_name)
.canonicalize(&self.block_visitor.bv.current_environment);
let source_path = Path::get_as_path(self.actual_args[2].1.clone());
let target_type = self.actual_argument_types[2];
self.block_visitor.bv.copy_or_move_elements(
target_path,
source_path,
target_type,
true,
);
let exit_condition = self
.block_visitor
.bv
.current_environment
.entry_condition
.clone();
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, exit_condition);
} else {
assume_unreachable!();
}
}
/// Removes the heap block and all paths rooted in it from the current environment.
#[logfn_inputs(TRACE)]
fn handle_rust_dealloc(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 3);
// The current environment is that that of the caller, but the caller is a standard
// library function and has no interesting state to purge.
// The layout path inserted below will become a side effect of the caller and when that
// side effect is refined by the caller's caller, the refinement will do the purge if the
// qualifier of the path is a heap block path.
// Get path to the heap block to deallocate
let heap_block_path = Path::get_as_path(self.actual_args[0].1.clone());
// Create a layout
let length = self.actual_args[1].1.clone();
let alignment = self.actual_args[2].1.clone();
let layout = AbstractValue::make_from(
Expression::HeapBlockLayout {
length,
alignment,
source: LayoutSource::DeAlloc,
},
1,
);
// Get a layout path and update the environment
let layout_path = Path::new_layout(heap_block_path)
.canonicalize(&self.block_visitor.bv.current_environment);
self.block_visitor.bv.update_value_at(layout_path, layout);
// Signal to the caller that there is no return result
abstract_value::BOTTOM.into()
}
/// Copies a slice of elements from the source to the destination.
#[logfn_inputs(TRACE)]
fn handle_copy_non_overlapping(&mut self) {
checked_assume!(self.actual_args.len() == 3);
let source_path = Path::new_deref(
Path::get_as_path(self.actual_args[0].1.clone()),
ExpressionType::NonPrimitive,
);
let target_root = Path::new_deref(
Path::get_as_path(self.actual_args[1].1.clone()),
ExpressionType::NonPrimitive,
);
let count = self.actual_args[2].1.clone();
let target_path = Path::new_slice(target_root, count);
let collection_type = self.actual_argument_types[0];
self.block_visitor.bv.copy_or_move_elements(
target_path,
source_path,
collection_type,
false,
);
self.use_entry_condition_as_exit_condition();
}
/// Returns the value of the discriminant for the variant in 'v',
/// cast to a `u64`; if `T` has no discriminant, returns 0.
#[logfn_inputs(TRACE)]
fn handle_discriminant_value(&mut self) {
checked_assume!(self.actual_args.len() == 1);
if let Some((place, _)) = &self.destination {
let target_type = ExpressionType::from(
self.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0])
.kind(),
);
let discriminant_path = Path::new_discriminant(Path::new_deref(
Path::get_as_path(self.actual_args[0].1.clone()),
target_type,
))
.canonicalize(&self.block_visitor.bv.current_environment);
let mut discriminant_value = self.block_visitor.bv.lookup_path_and_refine_result(
discriminant_path,
self.block_visitor.bv.tcx.types.u128,
);
// If `T` has no discriminant, return 0.
match self.callee_generic_arguments {
None => assume_unreachable!(
"expected discriminant_value function call to have a generic argument"
),
Some(rustc_gen_args) => {
checked_assume!(rustc_gen_args.len() == 1);
match rustc_gen_args[0].unpack() {
GenericArgKind::Type(ty) => match ty.kind() {
TyKind::Adt(def, _) if def.is_enum() => {}
TyKind::Generator(..) => {}
_ => {
discriminant_value = Rc::new(ConstantDomain::U128(0).into());
}
},
_ => {
// The rust type checker should ensure that the generic argument is a type.
assume_unreachable!(
"expected the generic argument of discriminant_value function calls to be a type"
);
}
}
}
}
let target_path = self.block_visitor.visit_rh_place(place);
self.block_visitor
.bv
.update_value_at(target_path, discriminant_value);
}
self.use_entry_condition_as_exit_condition();
}
/// Swaps a slice of elements from the source to the destination.
#[logfn_inputs(TRACE)]
fn handle_swap_non_overlapping(&mut self) {
checked_assume!(self.actual_args.len() == 3);
let ty = self.actual_argument_types[0];
let target_root = Path::new_deref(
Path::get_as_path(self.actual_args[0].1.clone()),
ExpressionType::NonPrimitive,
);
let source_root = Path::new_deref(
Path::get_as_path(self.actual_args[1].1.clone()),
ExpressionType::NonPrimitive,
);
let count = self.actual_args[2].1.clone();
let source_slice = Path::new_slice(source_root.clone(), count.clone());
let target_slice = Path::new_slice(target_root.clone(), count.clone());
let temp_root = Path::new_local(999_999, 0);
let temp_slice = Path::new_slice(temp_root.clone(), count);
self.block_visitor
.bv
.copy_or_move_elements(temp_slice, target_root, ty, true);
self.block_visitor
.bv
.copy_or_move_elements(target_slice, source_root, ty, true);
self.block_visitor
.bv
.copy_or_move_elements(source_slice, temp_root, ty, true);
self.use_entry_condition_as_exit_condition();
}
/// Returns a new heap memory block with the given byte length.
#[logfn_inputs(TRACE)]
fn handle_rust_alloc(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 2);
let length = self.actual_args[0].1.clone();
let alignment = self.actual_args[1].1.clone();
let tcx = self.block_visitor.bv.tcx;
let byte_slice = tcx.mk_slice(tcx.types.u8);
let heap_path = Path::get_as_path(
self.block_visitor
.bv
.get_new_heap_block(length, alignment, false, byte_slice),
);
AbstractValue::make_reference(heap_path)
}
/// Returns a new heap memory block with the given byte length and with the zeroed flag set.
#[logfn_inputs(TRACE)]
fn handle_rust_alloc_zeroed(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 2);
let length = self.actual_args[0].1.clone();
let alignment = self.actual_args[1].1.clone();
let tcx = self.block_visitor.bv.tcx;
let byte_slice = tcx.mk_slice(tcx.types.u8);
let heap_path = Path::get_as_path(
self.block_visitor
.bv
.get_new_heap_block(length, alignment, true, byte_slice),
);
AbstractValue::make_reference(heap_path)
}
/// Sets the length of the heap block to a new value and removes index paths as necessary
/// if the new length is known and less than the old lengths.
#[logfn_inputs(TRACE)]
fn handle_rust_realloc(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 4);
// Get path to the heap block to reallocate
let target_type = ExpressionType::from(
self.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0])
.kind(),
);
let heap_block_path = Path::new_deref(
Path::get_as_path(self.actual_args[0].1.clone()),
target_type,
);
// Create a layout
let length = self.actual_args[1].1.clone();
let alignment = self.actual_args[2].1.clone();
let new_length = self.actual_args[3].1.clone();
// We need to this to check for consistency between the realloc layout arg and the
// initial alloc layout.
let layout_param = AbstractValue::make_from(
Expression::HeapBlockLayout {
length,
alignment: alignment.clone(),
source: LayoutSource::ReAlloc,
},
1,
);
// We need this to keep track of the new length
let new_length_layout = AbstractValue::make_from(
Expression::HeapBlockLayout {
length: new_length,
alignment,
source: LayoutSource::ReAlloc,
},
1,
);
// Get a layout path and update the environment
let layout_path = Path::new_layout(heap_block_path)
.canonicalize(&self.block_visitor.bv.current_environment);
self.block_visitor
.bv
.update_value_at(layout_path.clone(), new_length_layout);
let layout_path2 = Path::new_layout(layout_path);
self.block_visitor
.bv
.update_value_at(layout_path2, layout_param);
// Return the original heap block reference as the result
self.actual_args[0].1.clone()
}
/// Set the call result to an offset derived from the arguments. Does no checking.
#[logfn_inputs(TRACE)]
fn handle_arith_offset(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 2);
let base_val = self.actual_args[0].1.clone();
let offset_val = self.actual_args[1].1.clone();
let offset_scale = self.handle_size_of().cast(ExpressionType::Isize);
let offset_in_bytes = offset_val.multiply(offset_scale);
base_val.offset(offset_in_bytes)
}
/// Update the state to reflect a call to an intrinsic binary operation that returns a tuple
/// of an operation result, modulo its max value, and a flag that indicates if the max value
/// was exceeded.
#[logfn_inputs(TRACE)]
fn handle_checked_binary_operation(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 2);
if let Some((target_place, _)) = &self.destination {
let bin_op = match self.callee_known_name {
KnownNames::StdIntrinsicsMulWithOverflow => mir::BinOp::Mul,
_ => assume_unreachable!(),
};
let target_path = self.block_visitor.visit_rh_place(target_place);
let path0 = Path::new_field(target_path.clone(), 0);
let path1 = Path::new_field(target_path.clone(), 1);
let target_type = self
.type_visitor()
.get_target_path_type(&path0, self.block_visitor.bv.current_span);
let left = self.actual_args[0].1.clone();
let right = self.actual_args[1].1.clone();
let modulo = target_type.modulo_value();
let (result, overflow_flag) =
BlockVisitor::do_checked_binary_op(bin_op, target_type.clone(), left, right);
let (modulo_result, overflow_flag) = if !modulo.is_bottom() {
(result.remainder(target_type.modulo_value()), overflow_flag)
} else {
// todo: figure out an expression that represents the truncated overflow of a
// signed operation.
let unknown_typed_value =
AbstractValue::make_typed_unknown(target_type.clone(), path0.clone());
(
overflow_flag.conditional_expression(unknown_typed_value, result),
overflow_flag,
)
};
self.block_visitor.bv.update_value_at(path0, modulo_result);
self.block_visitor.bv.update_value_at(path1, overflow_flag);
AbstractValue::make_typed_unknown(target_type, target_path)
} else {
assume_unreachable!();
}
}
/// Set the call result to an offset derived from the arguments.
/// Checks that the resulting offset is either in bounds or one
/// byte past the end of an allocated object.
#[logfn_inputs(TRACE)]
fn handle_offset(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 2);
let base_val = self.actual_args[0].1.clone();
let offset_val = self.actual_args[1].1.clone();
let offset_scale = self.handle_size_of().cast(ExpressionType::Isize);
let offset_in_bytes = offset_val.multiply(offset_scale);
let result = base_val.offset(offset_in_bytes);
if self.block_visitor.bv.check_for_errors
&& self.block_visitor.bv.function_being_analyzed_is_root()
{
self.block_visitor.bv.check_offset(&result)
}
//todo: if the function is not root, promote this to a precondition
result
}
/// Moves `source` into the referenced `dest`, returning the previous `dest` value.
#[logfn_inputs(TRACE)]
fn handle_mem_replace(&mut self) {
checked_assume!(self.actual_args.len() == 2);
let target_type = ExpressionType::from(
self.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0])
.kind(),
);
let dest_path = Path::new_deref(
Path::get_as_path(self.actual_args[0].1.clone()),
target_type,
)
.canonicalize(&self.block_visitor.bv.current_environment);
let source_path = &Path::get_as_path(self.actual_args[1].1.clone());
if let Some((place, _)) = &self.destination {
let target_path = self.block_visitor.visit_rh_place(place);
let root_rustc_type = self
.type_visitor()
.get_rustc_place_type(place, self.block_visitor.bv.current_span);
// Return the old value of dest_path
self.block_visitor.bv.copy_or_move_elements(
target_path,
dest_path.clone(),
root_rustc_type,
true,
);
// Move value at source path into dest path
self.block_visitor.bv.copy_or_move_elements(
dest_path,
source_path.clone(),
root_rustc_type,
true,
);
}
self.use_entry_condition_as_exit_condition();
}
/// Gets the size in bytes of the type parameter T of the std::mem::size_of<T> function.
/// Returns an unknown value of type u128 if T is not a concrete type.
#[logfn_inputs(TRACE)]
fn handle_size_of(&mut self) -> Rc<AbstractValue> {
let sym = rustc_span::Symbol::intern("T");
let t = (self.callee_generic_argument_map.as_ref())
.expect("std::intrinsics::size_of must be called with generic arguments")
.get(&sym)
.expect("std::intrinsics::size_of must have generic argument T")
.expect_ty();
let param_env = self.block_visitor.bv.tcx.param_env(self.callee_def_id);
if let Ok(ty_and_layout) = self.block_visitor.bv.tcx.layout_of(param_env.and(t)) {
if !ty_and_layout.is_unsized() {
return Rc::new((ty_and_layout.layout.size.bytes() as u128).into());
}
}
let path = self.block_visitor.visit_rh_place(
&self
.destination
.expect("std::intrinsics::size_of should have a destination")
.0,
);
AbstractValue::make_typed_unknown(ExpressionType::U128, path)
}
/// Determines whether the raw bytes of the two values are equal.
fn handle_raw_eq(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 2);
let left_val = self.actual_args[0].1.clone();
let right_val = self.actual_args[1].1.clone();
let len_val = self.handle_size_of();
let zero = Rc::new(ConstantDomain::U128(0).into());
AbstractValue::make_memcmp(left_val, right_val, len_val).equals(zero)
}
/// Calls implementation provided memcmp.
///
/// Interprets the data as u8.
///
/// Returns 0 for equal, < 0 for less than and > 0 for greater than.
fn handle_memcmp(&mut self) -> Rc<AbstractValue> {
checked_assume!(self.actual_args.len() == 3);
let left_val = self.actual_args[0].1.clone();
let right_val = self.actual_args[1].1.clone();
let len_val = self.actual_args[2].1.clone();
AbstractValue::make_memcmp(left_val, right_val, len_val)
}
/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to.
///
/// Every reference to a value of the type `T` must be a multiple of this number.
fn handle_min_align_of_val(&mut self) -> Rc<AbstractValue> {
let param_env = self.block_visitor.bv.tcx.param_env(self.callee_def_id);
checked_assume!(self.actual_argument_types.len() == 1);
let t = self
.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0]);
if let Ok(ty_and_layout) = self.block_visitor.bv.tcx.layout_of(param_env.and(t)) {
return Rc::new((ty_and_layout.layout.align.abi.bytes() as u128).into());
}
// todo: need an expression that resolves to the value size once the value is known (typically after call site refinement).
let path = self.block_visitor.visit_rh_place(
&self
.destination
.expect("std::intrinsics::min_align_of_val should have a destination")
.0,
);
AbstractValue::make_typed_unknown(ExpressionType::U128, path)
}
/// Returns the size of the pointed-to value in bytes.
///
/// This is usually the same as `size_of::<T>()`. However, when `T` *has* no
/// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object],
/// then `size_of_val` can be used to get the dynamically-known size.
#[logfn_inputs(TRACE)]
fn handle_size_of_val(&mut self) -> Rc<AbstractValue> {
let param_env = self.block_visitor.bv.tcx.param_env(self.callee_def_id);
checked_assume!(self.actual_argument_types.len() == 1);
let t = self.actual_argument_types[0];
checked_assume!(self.actual_args.len() == 1);
let val = &self.actual_args[0].1;
if matches!(val.expression, Expression::HeapBlock { .. }) {
// If the value is heap allocated, we can get its size from the layout path
let heap_path = Path::get_as_path(val.clone());
let layout_path = Path::new_layout(heap_path);
let layout_val = self.block_visitor.bv.lookup_path_and_refine_result(
layout_path,
ExpressionType::NonPrimitive.as_rustc_type(self.block_visitor.bv.tcx),
);
if let Expression::HeapBlockLayout { length, .. } = &layout_val.expression {
return length.clone();
}
} else if self.type_visitor().is_slice_pointer(t.kind()) {
let elem_t = self.type_visitor().get_element_type(t);
if let Ok(ty_and_layout) = self.block_visitor.bv.tcx.layout_of(param_env.and(elem_t)) {
if !ty_and_layout.is_unsized() {
let elem_size_val: Rc<AbstractValue> =
Rc::new((ty_and_layout.layout.size.bytes() as u128).into());
let length_path =
Path::new_length(Path::get_as_path(self.actual_args[0].1.clone()));
let len_val = self.block_visitor.bv.lookup_path_and_refine_result(
length_path,
ExpressionType::Usize.as_rustc_type(self.block_visitor.bv.tcx),
);
return len_val.multiply(elem_size_val);
}
}
}
let sym = rustc_span::Symbol::intern("T");
let t = (self.callee_generic_argument_map.as_ref())
.expect("std::intrinsics::size_of_val must be called with generic arguments")
.get(&sym)
.expect("std::intrinsics::size_of_val must have generic argument T")
.expect_ty();
if let Ok(ty_and_layout) = self.block_visitor.bv.tcx.layout_of(param_env.and(t)) {
if !ty_and_layout.is_unsized() {
return Rc::new((ty_and_layout.layout.size.bytes() as u128).into());
}
}
// todo: need an expression that resolves to the value size once the value is known (typically after call site refinement).
let path = self.block_visitor.visit_rh_place(
&self
.destination
.expect("std::intrinsics::size_of_value should have a destination")
.0,
);
AbstractValue::make_typed_unknown(ExpressionType::U128, path)
}
/// Reinterprets the bits of a value of one type as another type.
///
/// Both types must have the same size. Neither the original, nor the result,
/// may be an [invalid value](../../nomicon/what-unsafe-does.html).
///
/// `transmute` is semantically equivalent to a bitwise move of one type
/// into another. It copies the bits from the source value into the
/// destination value, then forgets the original. It's equivalent to C's
/// `memcpy` under the hood, just like `transmute_copy`.
#[logfn_inputs(TRACE)]
fn handle_transmute(&mut self) {
checked_assume!(self.actual_args.len() == 1);
let source_path = Path::get_as_path(self.actual_args[0].1.clone());
let source_rustc_type = self
.callee_generic_arguments
.expect("rustc type error")
.get(0)
.expect("rustc type error")
.expect_ty();
if let Some((place, _)) = &self.destination {
let target_path = self.block_visitor.visit_rh_place(place);
let target_rustc_type = self
.type_visitor()
.get_rustc_place_type(place, self.block_visitor.bv.current_span);
self.block_visitor.bv.copy_and_transmute(
source_path,
source_rustc_type,
target_path,
target_rustc_type,
);
}
self.use_entry_condition_as_exit_condition();
}
#[logfn_inputs(TRACE)]
fn handle_write_bytes(&mut self) {
checked_assume!(self.actual_args.len() == 3);
let target_type = ExpressionType::from(
self.type_visitor()
.get_dereferenced_type(self.actual_argument_types[0])
.kind(),
);
let dest_path = Path::new_deref(self.actual_args[0].0.clone(), target_type)
.canonicalize(&self.block_visitor.bv.current_environment);
let dest_type = self.actual_argument_types[0];
let source_path = self.actual_args[1]
.0
.canonicalize(&self.block_visitor.bv.current_environment);
let byte_value = &self.actual_args[1].1;
let count_value = self.actual_args[2].1.clone();
let elem_type = self
.callee_generic_arguments
.expect("write_bytes<T>")
.get(0)
.expect("write_bytes<T>")
.expect_ty();
let mut elem_size = self.type_visitor().get_type_size(elem_type);
fn repeated_bytes(mut elem_size: u64, byte_value: &Rc<AbstractValue>) -> Rc<AbstractValue> {
let const_8: Rc<AbstractValue> = Rc::new(8u128.into());
let mut source_value = byte_value.clone();
while elem_size > 1 {
source_value = source_value
.shift_left(const_8.clone())
.bit_or(byte_value.clone());
elem_size -= 1;
}
source_value
}
if elem_type.is_primitive() {
let dest_pattern = Path::new_slice(dest_path, count_value);
let source_value = repeated_bytes(elem_size, byte_value);
self.block_visitor
.bv
.update_value_at(dest_pattern, source_value);
} else if let Expression::CompileTimeConstant(ConstantDomain::U128(count)) =
&count_value.expression
{
if let TyKind::Adt(..) | TyKind::Tuple(..) = &elem_type.kind() {
for i in 0..(*count as usize) {
let dest_field = Path::new_field(dest_path.clone(), i);
let field_type = self
.type_visitor()
.get_path_rustc_type(&dest_field, self.block_visitor.bv.current_span);
let field_size = self.type_visitor().get_type_size(field_type);
elem_size -= field_size;
let field_value = repeated_bytes(field_size, byte_value);
self.block_visitor
.bv
.update_value_at(dest_field, field_value);
if elem_size == 0 {
break;
}
}
} else {
if *count > 1 {
warn!(
"unhandled call to write_bytes<{:?}>({:?}: {:?}, {:?}, {:?})",
elem_type,
self.actual_args[0],
dest_type,
self.actual_args[1],
self.actual_args[2]
);
}
self.block_visitor.bv.copy_or_move_elements(
dest_path,
source_path,
elem_type,
false,
);
}
} else {
warn!(
"unhandled call to write_bytes at {:?}",
self.block_visitor.bv.current_span
);
info!("elem_type {:?}", elem_type);
info!("dest {:?}", self.actual_args[0]);
info!("dest_type {:?}", dest_type);
info!("val {:?}", self.actual_args[1]);
info!("count {:?}", self.actual_args[2]);
}
self.use_entry_condition_as_exit_condition();
}
/// Give diagnostic depending on self.bv.options.diag_level
#[logfn_inputs(TRACE)]
pub fn report_incomplete_summary(&mut self) {
if self.block_visitor.might_be_reachable().unwrap_or(true) {
if let Some(promotable_entry_condition) = self
.block_visitor
.bv
.current_environment
.entry_condition
.extract_promotable_conjuncts(false)
{
if promotable_entry_condition.as_bool_if_known().is_none() {
let precondition = Precondition {
condition: promotable_entry_condition.logical_not(),
message: Rc::from("incomplete analysis of call because of failure to resolve a nested call"),
provenance: None,
spans: vec![self.block_visitor.bv.current_span.source_callsite()],
};
self.block_visitor.bv.preconditions.push(precondition);
return;
}
}
self.block_visitor.bv.analysis_is_incomplete = true;
// If the callee is local, there will already be a diagnostic about the incomplete summary.
if !self.callee_def_id.is_local()
&& self.block_visitor.bv.cv.options.diag_level != DiagLevel::Default
{
let warning = self.block_visitor.bv.cv.session.struct_span_warn(
self.block_visitor.bv.current_span,
"the called function could not be completely analyzed",
);
self.block_visitor.bv.emit_diagnostic(warning);
}
let argument_type_hint = if let Some(func) = &self.callee_func_ref {
format!(" (foreign fn argument key: {})", func.argument_type_key)
} else {
"".to_string()
};
// todo: when a call site has an expression that does not result in a compile time
// constant function, perhaps construct a dummy function that is the join of the
// summaries of the function constants that might flow into the expression.
//todo: handle parameters that are arrays of functions
if self.block_visitor.bv.def_id.is_local() && !self.callee_def_id.is_local() {
info!(
"function {} can't be reliably analyzed because it calls function {} which could not be summarized{}.",
utils::summary_key_str(self.block_visitor.bv.tcx, self.block_visitor.bv.def_id),
utils::summary_key_str(self.block_visitor.bv.tcx, self.callee_def_id),
argument_type_hint,
);
} else {
debug!(
"function {} can't be reliably analyzed because it calls function {} which could not be summarized{}.",
utils::summary_key_str(self.block_visitor.bv.tcx, self.block_visitor.bv.def_id),
utils::summary_key_str(self.block_visitor.bv.tcx, self.callee_def_id),
argument_type_hint,
);
}
debug!("def_id {:?}", self.callee_def_id);
}
}
/// Refines the summary using the call arguments and local environment and transfers
/// the side effects of the summary into the current environment, while also checking
/// preconditions and add the post conditions to the exit condition guarding the post call target block.
#[logfn_inputs(TRACE)]
pub fn transfer_and_refine_into_current_environment(&mut self, function_summary: &Summary) {
debug!("def_id {:?}", self.callee_def_id);
debug!("summary {:?}", function_summary);
debug!("pre env {:?}", self.block_visitor.bv.current_environment);
debug!(
"target {:?} arguments {:?}",
self.destination, self.actual_args
);
self.check_preconditions_if_necessary(function_summary);
self.transfer_and_refine_normal_return_state(function_summary);
self.add_post_condition_to_exit_conditions(function_summary);
debug!("post env {:?}", self.block_visitor.bv.current_environment);
}
/// If we are checking for errors and have not assumed the preconditions of the called function
/// and we are not in angelic mode and have not already reported an error for this call,
/// then check the preconditions and report any conditions that are not known to hold at this point.
#[logfn_inputs(TRACE)]
pub fn check_preconditions_if_necessary(&mut self, function_summary: &Summary) {
if self.block_visitor.bv.check_for_errors
&& self
.block_visitor
.bv
.current_environment
.entry_condition
.as_bool_if_known()
.unwrap_or(true)
&& !self.block_visitor.bv.assume_preconditions_of_next_call
&& !self
.block_visitor
.bv
.already_reported_errors_for_call_to
.contains(&self.callee_fun_val)
{
self.check_function_preconditions(function_summary);
} else {
self.block_visitor.bv.assume_preconditions_of_next_call = false;
}
}
/// Checks if the preconditions obtained from the summary of the function being called
/// are met by the current state and arguments of the calling function.
/// Preconditions that are definitely false and reachable cause diagnostic messages.
/// Preconditions that are maybe false become preconditions of the calling function
/// unless the calling function is an analysis root, in which case a diagnostic message is issued.
#[logfn_inputs(TRACE)]
fn check_function_preconditions(&mut self, function_summary: &Summary) {
verify!(self.block_visitor.bv.check_for_errors);
// A precondition can refer to the result if the precondition prevents the result expression
// from overflowing.
let result = self
.destination
.map(|(r, _)| self.block_visitor.visit_rh_place(&r));
for precondition in &function_summary.preconditions {
let mut refined_condition = precondition.condition.refine_parameters_and_paths(
&self.actual_args,
&result,
&self.environment_before_call,
&self.block_visitor.bv.current_environment,
self.block_visitor.bv.fresh_variable_offset,
);
if self
.block_visitor
.bv
.current_environment
.entry_condition
.as_bool_if_known()
.is_none()
{
refined_condition = refined_condition.refine_with(
&self.block_visitor.bv.current_environment.entry_condition,
0,
);
}
let (refined_precondition_as_bool, entry_cond_as_bool) = self
.block_visitor
.bv
.check_condition_value_and_reachability(&refined_condition);
if refined_precondition_as_bool.unwrap_or(false) {
// The precondition is definitely true.
continue;
};
if !entry_cond_as_bool.unwrap_or(true) {
// The call is unreachable, so the precondition does not matter
continue;
}
let warn;
if !refined_precondition_as_bool.unwrap_or(true) {
// The precondition is definitely false.
if entry_cond_as_bool.unwrap_or(false) {
// We always get to this call
self.issue_diagnostic_for_call(precondition, &refined_condition, false);
return;
} else {
// Promote the precondition, but be assertive.
// When the caller fails to meet the precondition, failure is certain.
warn = false;
}
} else {
warn = true;
}
// If the current function is not an analysis root, promote the precondition, subject to a k-limit.
if (!self.block_visitor.bv.function_being_analyzed_is_root()
|| self.block_visitor.bv.cv.options.diag_level == DiagLevel::Default)
&& self.block_visitor.bv.preconditions.len() < k_limits::MAX_INFERRED_PRECONDITIONS
{
// Promote the callee precondition to a precondition of the current function.
// Unless, of course, if the precondition is already a precondition of the
// current function.
let seen_precondition = self.block_visitor.bv.preconditions.iter().any(|pc| {
pc.spans.last() == precondition.spans.last()
|| pc.provenance == precondition.provenance
});
if seen_precondition {
continue;
}
let promoted_condition = match (
self.block_visitor
.bv
.current_environment
.entry_condition
.extract_promotable_conjuncts(false),
refined_condition.extract_promotable_disjuncts(false),
) {
(Some(promotable_entry_condition), Some(promotable_condition)) => {
promotable_entry_condition
.logical_not()
.or(promotable_condition)
}
(Some(promotable_entry_condition), None) => {
promotable_entry_condition.logical_not()
}
(None, Some(promotable_condition)) => promotable_condition,
_ => Rc::new(abstract_value::FALSE),
};
if promoted_condition.as_bool_if_known().is_none() {
let mut stacked_spans = vec![self.block_visitor.bv.current_span];
stacked_spans.append(&mut precondition.spans.clone());
let promoted_precondition = Precondition {
condition: promoted_condition,
message: precondition.message.clone(),
provenance: precondition.provenance.clone(),
spans: stacked_spans,
};
self.block_visitor
.bv
.preconditions
.push(promoted_precondition);
continue;
}
}
// The precondition cannot be promoted, so the buck stops here.
if precondition
.message
.starts_with("incomplete analysis of call")
{
// If the precondition is not satisfied, the summary of the callee is incomplete
// and so should be the summary of this method.
self.block_visitor.bv.analysis_is_incomplete = true;
if self.block_visitor.bv.cv.options.diag_level == DiagLevel::Default {
// Don't give a diagnostic in default mode, since it is hard for casual users
// to do something about missing/incomplete summaries.
continue;
}
}
self.issue_diagnostic_for_call(precondition, &refined_condition, warn);
}
}
// Issue a diagnostic, but only if there isn't already a diagnostic for this
// function call.
#[logfn_inputs(TRACE)]
fn issue_diagnostic_for_call(
&mut self,
precondition: &Precondition,
condition: &Rc<AbstractValue>,
warn: bool,
) {
if self.block_visitor.bv.check_for_errors
&& !self
.block_visitor
.bv
.already_reported_errors_for_call_to
.contains(&self.callee_fun_val)
{
self.block_visitor
.emit_diagnostic_for_precondition(precondition, condition, warn);
self.block_visitor
.bv
.already_reported_errors_for_call_to
.insert(self.callee_fun_val.clone());
}
}
/// Updates the current state to reflect the effects of a normal return from the function call.
/// The paths and expressions of the side-effects are refined in the context of the pre-state
/// (the environment before the call executed), while the refined effects are applied to the
/// current state.
#[logfn_inputs(TRACE)]
pub fn transfer_and_refine_normal_return_state(&mut self, function_summary: &Summary) {
let destination = self.destination;
if let Some((place, _)) = &destination {
// Assign function result to place
let target_path = self.block_visitor.visit_rh_place(place);
let result_path = &Some(target_path.clone());
// If the summary has a concrete type for the return result, use that type rather
// than the possibly abstract type of the target path.
let result_type = self
.type_visitor()
.get_type_from_index(function_summary.return_type_index);
if !result_type.is_never() {
self.type_visitor_mut()
.set_path_rustc_type(target_path.clone(), result_type);
}
let return_value_path = Path::new_result();
let mut pre_environment = self.environment_before_call.clone();
// Transfer side effects
if function_summary.is_computed && !function_summary.is_incomplete {
// Effects on the heap
for (path, value) in function_summary.side_effects.iter() {
if path.is_rooted_by_non_local_structure() {
let rvalue = value.clone().refine_parameters_and_paths(
&self.actual_args,
result_path,
&self.environment_before_call,
&self.block_visitor.bv.current_environment,
self.block_visitor.bv.fresh_variable_offset,
);
let rpath = path.refine_parameters_and_paths(
&self.actual_args,
result_path,
&self.environment_before_call,
&self.block_visitor.bv.current_environment,
self.block_visitor.bv.fresh_variable_offset,
);
if rvalue.expression.infer_type() == ExpressionType::NonPrimitive {
let source_path = Path::get_as_path(rvalue.clone());
let source_type =
self.block_visitor.bv.type_visitor().get_path_rustc_type(
&source_path,
self.block_visitor.bv.current_span,
);
self.block_visitor.bv.copy_or_move_elements(
rpath.clone(),
source_path,
source_type,
false,
);
} else {
self.block_visitor
.bv
.update_value_at(rpath.clone(), rvalue.clone());
}
pre_environment.strong_update_value_at(rpath, rvalue);
}
check_for_early_return!(self.block_visitor.bv);
}
// Effects on the call result
self.block_visitor.bv.transfer_and_refine(
&function_summary.side_effects,
target_path,
&return_value_path,
result_path,
&self.actual_args,
&pre_environment,
);
// Effects on the call arguments
for (i, (target_path, _)) in self.actual_args.iter().enumerate() {
let parameter_path = Path::new_parameter(i + 1);
self.block_visitor.bv.transfer_and_refine(
&function_summary.side_effects,
target_path.clone(),
¶meter_path,
result_path,
&self.actual_args,
&pre_environment,
);
check_for_early_return!(self.block_visitor.bv);
}
} else {
// We don't know anything other than the return value type.
// We'll assume there were no side effects and no preconditions.
let args = self.actual_args.iter().map(|(_, a)| a.clone()).collect();
let result_type = self
.type_visitor()
.get_place_type(place, self.block_visitor.bv.current_span);
let result =
self.callee_fun_val
.uninterpreted_call(args, result_type, return_value_path);
self.block_visitor.bv.update_value_at(target_path, result);
}
}
}
/// If the function summary has a post condition, refine this and add it to the
/// exit conditions for the current block.
/// Note that this function has to be executed in the pre-state of the call.
/// Any variables left in the post condition of the summary refers to its parameters
/// and thus to the state of the current function as it was before making the
/// call to function that is summarized by function_summary.
#[logfn_inputs(TRACE)]
pub fn add_post_condition_to_exit_conditions(&mut self, function_summary: &Summary) {
let destination = self.destination;
if let Some((place, target)) = &destination {
let target_path = self.block_visitor.visit_lh_place(place);
let result_path = &Some(target_path);
let mut exit_condition = self
.block_visitor
.bv
.current_environment
.entry_condition
.clone();
if exit_condition.as_bool_if_known().unwrap_or(true) {
if let Some(post_condition) = &function_summary.post_condition {
let refined_post_condition = post_condition.refine_parameters_and_paths(
&self.actual_args,
result_path,
&self.environment_before_call,
&self.block_visitor.bv.current_environment,
self.block_visitor.bv.fresh_variable_offset,
);
trace!("refined post condition {:?}", refined_post_condition);
exit_condition = exit_condition.and(refined_post_condition);
trace!("post exit conditions {:?}", exit_condition);
}
}
self.block_visitor
.bv
.current_environment
.exit_conditions
.insert_mut(*target, exit_condition);
}
}
/// Extracts the string from an AbstractDomain that is required to be a reference to a string literal.
/// This is the case for helper MIRAI helper functions that are hidden in the documentation
/// and that are required to be invoked via macros that ensure that the argument providing
/// this value is always a string literal.
#[logfn_inputs(TRACE)]
fn coerce_to_string(&mut self, path: &Rc<Path>) -> Rc<str> {
if let PathEnum::Computed { value } = &path.value {
if let Expression::Reference(path) = &value.expression {
if let PathEnum::Computed { value } = &path.value {
if let Expression::CompileTimeConstant(ConstantDomain::Str(s)) =
&value.expression
{
return s.clone();
}
}
}
} else if let Some(value) = self.block_visitor.bv.current_environment.value_at(path) {
if let Expression::Reference(path) = &value.expression {
if let PathEnum::Computed { value } = &path.value {
if let Expression::CompileTimeConstant(ConstantDomain::Str(s)) =
&value.expression
{
return s.clone();
}
}
}
}
if self.block_visitor.bv.check_for_errors {
let warning = self.block_visitor.bv.cv.session.struct_span_warn(
self.block_visitor.bv.current_span,
"this argument should be a string literal, do not call this function directly",
);
self.block_visitor.bv.emit_diagnostic(warning);
}
Rc::from("dummy argument")
}
/// Extract the tag kind and the propagation set from the generic arg of the function call
/// underlying `add_tag!` or `has_tag!`. The tag type should be the second generic argument
/// of the current function call. The tag type itself should also be parameterized, and its
/// first type parameter should be a constant of type `TagPropagationSet`, which represents
/// the propagation set. Return a pair of the name of the tag type, as well as the propagation
/// set if the tag-related functions are called correctly, otherwise return `None`.
#[logfn_inputs(TRACE)]
fn extract_tag_kind_and_propagation_set(&mut self) -> Option<Tag> {
precondition!(
self.callee_known_name == KnownNames::MiraiAddTag
|| self.callee_known_name == KnownNames::MiraiHasTag
|| self.callee_known_name == KnownNames::MiraiDoesNotHaveTag
);
match self.callee_generic_arguments {
None => assume_unreachable!(
"expected tag-related function calls to have two generic arguments"
),
Some(rustc_gen_args) => {
checked_assume!(rustc_gen_args.len() == 2);
// The second generic argument of the function call is the tag type.
let tag_rustc_type;
match rustc_gen_args[1].unpack() {
GenericArgKind::Type(rustc_type) => tag_rustc_type = rustc_type,
_ => {
// The rust type checker should ensure that the second generic argument is a type.
assume_unreachable!(
"expected the second generic argument of tag-related function calls to be a type"
);
}
}
// The tag type should be a generic ADT whose first parameter is a constant.
let tag_adt_def;
let tag_substs_ref;
match tag_rustc_type.kind() {
TyKind::Adt(adt_def, substs_ref) if substs_ref.len() > 0 => {
tag_adt_def = adt_def;
tag_substs_ref = substs_ref;
}
_ => {
if self.block_visitor.bv.check_for_errors {
let warning = self.block_visitor.bv.cv.session.struct_span_warn(
self.block_visitor.bv.current_span,
"the tag type should be a generic type whose first parameter is a constant of type TagPropagationSet",
);
self.block_visitor.bv.emit_diagnostic(warning);
}
return None;
}
}
// Extract the tag type's first parameter.
let tag_propagation_set_rustc_const;
match tag_substs_ref[0].unpack() {
GenericArgKind::Const(rustc_const)
if *rustc_const.ty.kind() == TyKind::Uint(UintTy::U128) =>
{
tag_propagation_set_rustc_const = rustc_const
}
_ => {
if self.block_visitor.bv.check_for_errors {
let warning = self.block_visitor.bv.cv.session.struct_span_warn(
self.block_visitor.bv.current_span,
"the first parameter of the tag type should have type TagPropagationSet"
);
self.block_visitor.bv.emit_diagnostic(warning);
}
return None;
}
}
// Analyze the tag type's first parameter to obtain a compile-time constant.
let tag_propagation_set_value = self
.block_visitor
.visit_const(tag_propagation_set_rustc_const);
if let Expression::CompileTimeConstant(ConstantDomain::U128(data)) =
&tag_propagation_set_value.expression
{
let tag = Tag {
def_id: tag_adt_def.did.into(),
prop_set: *data,
};
// Record the tag if it is the constant-time verification tag.
self.check_and_record_constant_time_verification_tag(tag_adt_def.did, &tag);
Some(tag)
} else {
// We have already checked that `tag_propagation_set_rustc_const.ty.kind()` is
// `TyKind::Uint(ast::UintTy::U128)`, so the extracted compile-time constant
// must be `ConstantDomain::U128(..)`.
assume_unreachable!(
"expected the constant generic arg to be a compile-time constant"
);
}
}
}
}
/// Check if `tag` whose def id is `tag_def_id` is the constant-time verification tag specified
/// by the user. If so, record the tag in the current crate visitor.
#[logfn_inputs(TRACE)]
fn check_and_record_constant_time_verification_tag(&mut self, tag_def_id: DefId, tag: &Tag) {
if self.block_visitor.bv.cv.constant_time_tag_cache.is_none() {
let matched = self
.block_visitor
.bv
.cv
.options
.constant_time_tag_name
.as_ref()
.map_or(false, |expected_tag_name| {
expected_tag_name.eq(&self.block_visitor.bv.tcx.def_path_str(tag_def_id))
});
if matched {
self.block_visitor.bv.cv.constant_time_tag_cache = Some(*tag);
}
}
}
}
| 46.525862 | 148 | 0.526568 |
1445cd35a1d72bee71983cc50a21157e3fdd4c6b | 6,630 | use makepad_render::*;
use crate::scrollbar::*;
#[derive(Clone)]
pub struct ScrollView{
pub view:View,
pub scroll_h:Option<ScrollBar>,
pub scroll_v:Option<ScrollBar>,
}
impl ScrollView{
pub fn new(cx: &mut Cx) -> Self {
Self {
view: View::new(cx),
scroll_h: Some(ScrollBar::new(cx)),
scroll_v: Some(ScrollBar {
smoothing: Some(0.15),
..ScrollBar::new(cx)
}),
}
}
pub fn proto_no_scroll(cx: &mut Cx) -> Self {
Self {
view: View::new(cx),
scroll_h: None,
scroll_v: None
}
}
pub fn begin_view(&mut self, cx: &mut Cx, layout: Layout) -> ViewRedraw {
self.view.begin_view(cx, layout)
}
pub fn view_will_redraw(&mut self, cx: &mut Cx)->bool{
self.view.view_will_redraw(cx)
}
pub fn handle_scroll_bars(&mut self, cx: &mut Cx, event: &mut Event) -> bool {
let mut ret_h = ScrollBarEvent::None;
let mut ret_v = ScrollBarEvent::None;
if let Some(scroll_h) = &mut self.scroll_h {
ret_h = scroll_h.handle_scroll_bar(cx, event);
}
if let Some(scroll_v) = &mut self.scroll_v {
ret_v = scroll_v.handle_scroll_bar(cx, event);
}
match ret_h {
ScrollBarEvent::None => (),
ScrollBarEvent::Scroll {scroll_pos, ..} => {
cx.set_view_scroll_x(self.view.view_id.unwrap(), scroll_pos);
},
_ => ()
};
match ret_v {
ScrollBarEvent::None => (),
ScrollBarEvent::Scroll {scroll_pos, ..} => {
cx.set_view_scroll_y(self.view.view_id.unwrap(), scroll_pos);
},
_ => ()
};
ret_h != ScrollBarEvent::None || ret_v != ScrollBarEvent::None
}
pub fn get_scroll_pos(&self, cx: &Cx) -> Vec2 {
if let Some(view_id) = self.view.view_id {
let cxview = &cx.views[view_id];
cxview.unsnapped_scroll
}
else {
Vec2::default()
}
}
pub fn set_scroll_pos(&mut self, cx: &mut Cx, pos: Vec2) -> bool {
let view_id = self.view.view_id.unwrap();
//let view_area = Area::DrawList(DrawListArea{draw_list_id:draw_list_id, redraw_id:cx.redraw_id});
let mut changed = false;
if let Some(scroll_h) = &mut self.scroll_h {
if scroll_h.set_scroll_pos(cx, pos.x) {
let scroll_pos = scroll_h.get_scroll_pos();
cx.set_view_scroll_x(view_id, scroll_pos);
changed = true;
}
}
if let Some(scroll_v) = &mut self.scroll_v {
if scroll_v.set_scroll_pos(cx, pos.y) {
let scroll_pos = scroll_v.get_scroll_pos();
cx.set_view_scroll_y(view_id, scroll_pos);
changed = true;
}
}
changed
}
pub fn set_scroll_view_total(&mut self, cx: &mut Cx, view_total: Vec2) {
if let Some(scroll_h) = &mut self.scroll_h {
scroll_h.set_scroll_view_total(cx, view_total.x)
}
if let Some(scroll_v) = &mut self.scroll_v {
scroll_v.set_scroll_view_total(cx, view_total.y)
}
}
pub fn get_scroll_view_total(&mut self) -> Vec2 {
Vec2 {
x: if let Some(scroll_h) = &mut self.scroll_h {
scroll_h.get_scroll_view_total()
}else {0.},
y: if let Some(scroll_v) = &mut self.scroll_v {
scroll_v.get_scroll_view_total()
}else {0.}
}
}
pub fn scroll_into_view(&mut self, cx: &mut Cx, rect: Rect) {
if let Some(scroll_h) = &mut self.scroll_h {
scroll_h.scroll_into_view(cx, rect.x, rect.w, true);
}
if let Some(scroll_v) = &mut self.scroll_v {
scroll_v.scroll_into_view(cx, rect.y, rect.h, true);
}
}
pub fn scroll_into_view_no_smooth(&mut self, cx: &mut Cx, rect: Rect) {
if let Some(scroll_h) = &mut self.scroll_h {
scroll_h.scroll_into_view(cx, rect.x, rect.w, false);
}
if let Some(scroll_v) = &mut self.scroll_v {
scroll_v.scroll_into_view(cx, rect.y, rect.h, false);
}
}
pub fn scroll_into_view_abs(&mut self, cx: &mut Cx, rect: Rect) {
let self_rect = self.get_rect(cx);
if let Some(scroll_h) = &mut self.scroll_h {
scroll_h.scroll_into_view(cx, rect.x - self_rect.x, rect.w, true);
}
if let Some(scroll_v) = &mut self.scroll_v {
scroll_v.scroll_into_view(cx, rect.y - self_rect.y, rect.h, true);
}
}
pub fn set_scroll_target(&mut self, cx: &mut Cx, pos: Vec2) {
if let Some(scroll_h) = &mut self.scroll_h {
scroll_h.set_scroll_target(cx, pos.x);
}
if let Some(scroll_v) = &mut self.scroll_v {
scroll_v.set_scroll_target(cx, pos.y);
}
}
pub fn end_view(&mut self, cx: &mut Cx) -> Area {
let view_id = self.view.view_id.unwrap();
let view_area = Area::View(ViewArea {view_id: view_id, redraw_id: cx.redraw_id});
// lets ask the turtle our actual bounds
let view_total = cx.get_turtle_bounds();
let mut rect_now = cx.get_turtle_rect();
if rect_now.h.is_nan() {
rect_now.h = view_total.y;
}
if rect_now.w.is_nan() {
rect_now.w = view_total.x;
}
if let Some(scroll_h) = &mut self.scroll_h {
let scroll_pos = scroll_h.draw_scroll_bar(cx, Axis::Horizontal, view_area, rect_now, view_total);
cx.set_view_scroll_x(view_id, scroll_pos);
}
if let Some(scroll_v) = &mut self.scroll_v {
//println!("SET SCROLLBAR {} {}", rect_now.h, view_total.y);
let scroll_pos = scroll_v.draw_scroll_bar(cx, Axis::Vertical, view_area, rect_now, view_total);
cx.set_view_scroll_y(view_id, scroll_pos);
}
let rect = cx.end_turtle(view_area);
let cxview = &mut cx.views[view_id];
cxview.rect = rect;
cx.view_stack.pop();
return view_area
}
pub fn get_rect(&mut self, cx: &Cx) -> Rect {
self.view.get_rect(cx)
}
pub fn redraw_view_area(&self, cx: &mut Cx) {
self.view.redraw_view_area(cx)
}
pub fn get_view_area(&self, cx: &Cx) -> Area {
self.view.get_view_area(cx)
}
}
| 32.985075 | 109 | 0.545852 |
f875b4a2ef7f1eebf2466f514fa4dd702097986b | 14,186 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::FileChooser;
use crate::FileChooserAction;
use crate::FileFilter;
use crate::NativeDialog;
use crate::Widget;
use crate::Window;
use glib::object::Cast;
use glib::object::IsA;
use glib::object::ObjectType as ObjectType_;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
#[doc(alias = "GtkFileChooserNative")]
pub struct FileChooserNative(Object<ffi::GtkFileChooserNative, ffi::GtkFileChooserNativeClass>) @extends NativeDialog, @implements FileChooser;
match fn {
type_ => || ffi::gtk_file_chooser_native_get_type(),
}
}
impl FileChooserNative {
#[doc(alias = "gtk_file_chooser_native_new")]
pub fn new<P: IsA<Window>>(
title: Option<&str>,
parent: Option<&P>,
action: FileChooserAction,
accept_label: Option<&str>,
cancel_label: Option<&str>,
) -> FileChooserNative {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::gtk_file_chooser_native_new(
title.to_glib_none().0,
parent.map(|p| p.as_ref()).to_glib_none().0,
action.into_glib(),
accept_label.to_glib_none().0,
cancel_label.to_glib_none().0,
))
}
}
// rustdoc-stripper-ignore-next
/// Creates a new builder-pattern struct instance to construct [`FileChooserNative`] objects.
///
/// This method returns an instance of [`FileChooserNativeBuilder`] which can be used to create [`FileChooserNative`] objects.
pub fn builder() -> FileChooserNativeBuilder {
FileChooserNativeBuilder::default()
}
#[doc(alias = "gtk_file_chooser_native_get_accept_label")]
#[doc(alias = "get_accept_label")]
pub fn accept_label(&self) -> Option<glib::GString> {
unsafe {
from_glib_none(ffi::gtk_file_chooser_native_get_accept_label(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_file_chooser_native_get_cancel_label")]
#[doc(alias = "get_cancel_label")]
pub fn cancel_label(&self) -> Option<glib::GString> {
unsafe {
from_glib_none(ffi::gtk_file_chooser_native_get_cancel_label(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_file_chooser_native_set_accept_label")]
pub fn set_accept_label(&self, accept_label: Option<&str>) {
unsafe {
ffi::gtk_file_chooser_native_set_accept_label(
self.to_glib_none().0,
accept_label.to_glib_none().0,
);
}
}
#[doc(alias = "gtk_file_chooser_native_set_cancel_label")]
pub fn set_cancel_label(&self, cancel_label: Option<&str>) {
unsafe {
ffi::gtk_file_chooser_native_set_cancel_label(
self.to_glib_none().0,
cancel_label.to_glib_none().0,
);
}
}
#[doc(alias = "accept-label")]
pub fn get_property_accept_label(&self) -> Option<glib::GString> {
unsafe {
let mut value = glib::Value::from_type(<glib::GString as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(
self.as_ptr() as *mut glib::gobject_ffi::GObject,
b"accept-label\0".as_ptr() as *const _,
value.to_glib_none_mut().0,
);
value
.get()
.expect("Return Value for property `accept-label` getter")
}
}
#[doc(alias = "accept-label")]
pub fn set_property_accept_label(&self, accept_label: Option<&str>) {
unsafe {
glib::gobject_ffi::g_object_set_property(
self.as_ptr() as *mut glib::gobject_ffi::GObject,
b"accept-label\0".as_ptr() as *const _,
accept_label.to_value().to_glib_none().0,
);
}
}
#[doc(alias = "cancel-label")]
pub fn get_property_cancel_label(&self) -> Option<glib::GString> {
unsafe {
let mut value = glib::Value::from_type(<glib::GString as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(
self.as_ptr() as *mut glib::gobject_ffi::GObject,
b"cancel-label\0".as_ptr() as *const _,
value.to_glib_none_mut().0,
);
value
.get()
.expect("Return Value for property `cancel-label` getter")
}
}
#[doc(alias = "cancel-label")]
pub fn set_property_cancel_label(&self, cancel_label: Option<&str>) {
unsafe {
glib::gobject_ffi::g_object_set_property(
self.as_ptr() as *mut glib::gobject_ffi::GObject,
b"cancel-label\0".as_ptr() as *const _,
cancel_label.to_value().to_glib_none().0,
);
}
}
#[doc(alias = "accept-label")]
pub fn connect_accept_label_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_accept_label_trampoline<F: Fn(&FileChooserNative) + 'static>(
this: *mut ffi::GtkFileChooserNative,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::accept-label\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_accept_label_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "cancel-label")]
pub fn connect_cancel_label_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_cancel_label_trampoline<F: Fn(&FileChooserNative) + 'static>(
this: *mut ffi::GtkFileChooserNative,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::cancel-label\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_cancel_label_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
#[derive(Clone, Default)]
// rustdoc-stripper-ignore-next
/// A [builder-pattern] type to construct [`FileChooserNative`] objects.
///
/// [builder-pattern]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html
pub struct FileChooserNativeBuilder {
accept_label: Option<String>,
cancel_label: Option<String>,
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
modal: Option<bool>,
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
title: Option<String>,
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
transient_for: Option<Window>,
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
visible: Option<bool>,
action: Option<FileChooserAction>,
create_folders: Option<bool>,
do_overwrite_confirmation: Option<bool>,
extra_widget: Option<Widget>,
filter: Option<FileFilter>,
local_only: Option<bool>,
preview_widget: Option<Widget>,
preview_widget_active: Option<bool>,
select_multiple: Option<bool>,
show_hidden: Option<bool>,
use_preview_label: Option<bool>,
}
impl FileChooserNativeBuilder {
// rustdoc-stripper-ignore-next
/// Create a new [`FileChooserNativeBuilder`].
pub fn new() -> Self {
Self::default()
}
// rustdoc-stripper-ignore-next
/// Build the [`FileChooserNative`].
pub fn build(self) -> FileChooserNative {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref accept_label) = self.accept_label {
properties.push(("accept-label", accept_label));
}
if let Some(ref cancel_label) = self.cancel_label {
properties.push(("cancel-label", cancel_label));
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
if let Some(ref modal) = self.modal {
properties.push(("modal", modal));
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
if let Some(ref title) = self.title {
properties.push(("title", title));
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
if let Some(ref transient_for) = self.transient_for {
properties.push(("transient-for", transient_for));
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
if let Some(ref visible) = self.visible {
properties.push(("visible", visible));
}
if let Some(ref action) = self.action {
properties.push(("action", action));
}
if let Some(ref create_folders) = self.create_folders {
properties.push(("create-folders", create_folders));
}
if let Some(ref do_overwrite_confirmation) = self.do_overwrite_confirmation {
properties.push(("do-overwrite-confirmation", do_overwrite_confirmation));
}
if let Some(ref extra_widget) = self.extra_widget {
properties.push(("extra-widget", extra_widget));
}
if let Some(ref filter) = self.filter {
properties.push(("filter", filter));
}
if let Some(ref local_only) = self.local_only {
properties.push(("local-only", local_only));
}
if let Some(ref preview_widget) = self.preview_widget {
properties.push(("preview-widget", preview_widget));
}
if let Some(ref preview_widget_active) = self.preview_widget_active {
properties.push(("preview-widget-active", preview_widget_active));
}
if let Some(ref select_multiple) = self.select_multiple {
properties.push(("select-multiple", select_multiple));
}
if let Some(ref show_hidden) = self.show_hidden {
properties.push(("show-hidden", show_hidden));
}
if let Some(ref use_preview_label) = self.use_preview_label {
properties.push(("use-preview-label", use_preview_label));
}
glib::Object::new::<FileChooserNative>(&properties)
.expect("Failed to create an instance of FileChooserNative")
}
pub fn accept_label(mut self, accept_label: &str) -> Self {
self.accept_label = Some(accept_label.to_string());
self
}
pub fn cancel_label(mut self, cancel_label: &str) -> Self {
self.cancel_label = Some(cancel_label.to_string());
self
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
pub fn modal(mut self, modal: bool) -> Self {
self.modal = Some(modal);
self
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
pub fn title(mut self, title: &str) -> Self {
self.title = Some(title.to_string());
self
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
pub fn transient_for<P: IsA<Window>>(mut self, transient_for: &P) -> Self {
self.transient_for = Some(transient_for.clone().upcast());
self
}
#[cfg(any(feature = "v3_20", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))]
pub fn visible(mut self, visible: bool) -> Self {
self.visible = Some(visible);
self
}
pub fn action(mut self, action: FileChooserAction) -> Self {
self.action = Some(action);
self
}
pub fn create_folders(mut self, create_folders: bool) -> Self {
self.create_folders = Some(create_folders);
self
}
pub fn do_overwrite_confirmation(mut self, do_overwrite_confirmation: bool) -> Self {
self.do_overwrite_confirmation = Some(do_overwrite_confirmation);
self
}
pub fn extra_widget<P: IsA<Widget>>(mut self, extra_widget: &P) -> Self {
self.extra_widget = Some(extra_widget.clone().upcast());
self
}
pub fn filter(mut self, filter: &FileFilter) -> Self {
self.filter = Some(filter.clone());
self
}
pub fn local_only(mut self, local_only: bool) -> Self {
self.local_only = Some(local_only);
self
}
pub fn preview_widget<P: IsA<Widget>>(mut self, preview_widget: &P) -> Self {
self.preview_widget = Some(preview_widget.clone().upcast());
self
}
pub fn preview_widget_active(mut self, preview_widget_active: bool) -> Self {
self.preview_widget_active = Some(preview_widget_active);
self
}
pub fn select_multiple(mut self, select_multiple: bool) -> Self {
self.select_multiple = Some(select_multiple);
self
}
pub fn show_hidden(mut self, show_hidden: bool) -> Self {
self.show_hidden = Some(show_hidden);
self
}
pub fn use_preview_label(mut self, use_preview_label: bool) -> Self {
self.use_preview_label = Some(use_preview_label);
self
}
}
impl fmt::Display for FileChooserNative {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("FileChooserNative")
}
}
| 35.288557 | 147 | 0.587833 |
deb073b804e6583d6bcffe109cfd9ec6393a2243 | 7,259 | use io::{Ready, Readiness};
use reactor::{self, Source};
use mio::{self, Evented, EventSet, SetReadiness, Poll, PollOpt, Token};
use futures::{Future, Task};
use std::io;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
/// Processes multiple Futures and return their completed values in order they
/// were pushed into the queue.
///
/// Currently, only one `Future` is polled at a time, but it should be possible
/// to configure the number of futures concurrently polled, which would
/// increase parallelism.
pub struct AwaitQueue<T: Future> {
// TODO: Make this better
next_val: Arc<Mutex<Option<Result<T::Item, T::Error>>>>,
val: Option<Result<T::Item, T::Error>>,
task: Option<Task>,
remaining: VecDeque<T>,
in_flight: bool,
source: Source,
registration: Registration,
}
// TODO: Extract
struct Registration {
inner: RefCell<Option<(mio::Registration, SetReadiness)>>,
}
impl<T> AwaitQueue<T>
where T: Future
{
/// Create an `AwaitQueue` with an initial capacity of `n`
pub fn with_capacity(n: usize) -> io::Result<AwaitQueue<T>> {
let registration = Registration {
inner: RefCell::new(None),
};
let source = try!(reactor::register_source(®istration, Ready::readable()));
Ok(AwaitQueue {
next_val: Arc::new(Mutex::new(None)),
val: None,
task: None,
remaining: VecDeque::with_capacity(n),
in_flight: false,
source: source,
registration: registration,
})
}
/// Return the number of queued futures.
pub fn len(&self) -> usize {
let mut len = self.remaining.len();
if self.in_flight {
len += 1;
}
len
}
/// Returns true if there are no queued futures.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Push a new future for processing.
pub fn push(&mut self, future: T) {
trace!("AwaitQueue::push");
if self.in_flight {
self.remaining.push_back(future);
return;
}
self.schedule_future(future, true);
}
/// Push a new future for processing, attempting to resolve immediately
///
/// Returns the resolved future if the queue is empty and the future is
/// ready
pub fn push_poll(&mut self, future: T) -> Option<Result<T::Item, T::Error>> {
trace!("AwaitQueue::push_poll");
if self.in_flight {
self.remaining.push_back(future);
return None;
}
if self.schedule_future(future, false) {
// the value can be read from immediately and no further work is
// needed
self.in_flight = false;
self.val.take()
} else {
None
}
}
/// Poll for the next completed value.
///
/// If the future at the head of the `AwaitQueue` is complete, the result
/// will be returned and the next future will begin being processed. If the
/// future at the head of the queue is not ready, `None` is returned.
pub fn poll(&mut self) -> Option<Result<T::Item, T::Error>> {
trace!("AwaitQueue::poll");
if !self.is_readable() {
return None;
}
let v = self.val.take().or_else(|| {
self.next_val.lock().unwrap().take()
});
// The queue is not going to be readable anymore
self.source.unset_readable();
if let Some(v) = v {
trace!("AwaitQueue::poll; -> Got value");
// No futures are in flight
self.in_flight = false;
// Unset the `Evented` readiness
self.registration.set_readiness().unwrap()
.set_readiness(EventSet::none()).unwrap();
// Track progress at the source
self.source.advance();
// Schedule the next future
self.schedule_next();
// Return the value
return Some(v);
}
None
}
fn schedule_next(&mut self) {
if let Some(future) = self.remaining.pop_front() {
self.schedule_future(future, true);
}
}
fn schedule_future(&mut self, mut f: T, notify: bool) -> bool {
use futures::Poll;
trace!("AwaitQueue::schedule_future");
let mut task = self.task.take().unwrap_or_else(Task::new);
self.in_flight = true;
match f.poll(&mut task) {
Poll::Ok(v) => {
trace!("AwaitQueue::schedule_future -> done immediately");
self.task = Some(task);
self.val = Some(Ok(v));
if notify {
self.registration.set_readiness().unwrap().set_readiness(EventSet::readable()).unwrap();
}
true
}
Poll::Err(e) => {
self.task = Some(task);
self.val = Some(Err(e));
if notify {
self.registration.set_readiness().unwrap().set_readiness(EventSet::readable()).unwrap();
}
true
}
Poll::NotReady => {
let set_readiness = self.registration.set_readiness().unwrap();
let dst = self.next_val.clone();
let f = f.then(move |res| {
trace!(" future received value");
*dst.lock().unwrap() = Some(res);
set_readiness.set_readiness(EventSet::readable()).unwrap();
Ok(())
});
task.run(Box::new(f));
false
}
}
}
}
impl<T: Future> Readiness for AwaitQueue<T> {
fn is_readable(&self) -> bool {
self.val.is_some() || self.source.is_readable()
}
fn is_writable(&self) -> bool {
false
}
}
impl Registration {
fn set_readiness(&self) -> Option<SetReadiness> {
let inner = self.inner.borrow();
match *inner {
Some((_, ref set_readiness)) => Some(set_readiness.clone()),
_ => None,
}
}
}
impl Evented for Registration {
fn register(&self, poll: &Poll, token: Token, interest: EventSet, opts: PollOpt) -> io::Result<()> {
let mut inner = self.inner.borrow_mut();
if inner.is_some() {
return Err(io::Error::new(io::ErrorKind::Other, "already registered"));
}
let mio = mio::Registration::new(poll, token, interest, opts);
*inner = Some(mio);
Ok(())
}
fn reregister(&self, poll: &Poll, token: Token, interest: EventSet, opts: PollOpt) -> io::Result<()> {
let inner = self.inner.borrow();
match *inner {
Some((ref r, _)) => r.update(poll, token, interest, opts),
_ => Err(io::Error::new(io::ErrorKind::Other, "not registered")),
}
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
let inner = self.inner.borrow();
match *inner {
Some((ref r, _)) => r.deregister(poll),
_ => Err(io::Error::new(io::ErrorKind::Other, "not registered")),
}
}
}
| 29.036 | 108 | 0.539055 |
f5a95d7004bffdbdc2ad3097d65c5f489eda7573 | 60,996 | //! A classic liveness analysis based on dataflow over the AST. Computes,
//! for each local variable in a function, whether that variable is live
//! at a given point. Program execution points are identified by their
//! IDs.
//!
//! # Basic idea
//!
//! The basic model is that each local variable is assigned an index. We
//! represent sets of local variables using a vector indexed by this
//! index. The value in the vector is either 0, indicating the variable
//! is dead, or the ID of an expression that uses the variable.
//!
//! We conceptually walk over the AST in reverse execution order. If we
//! find a use of a variable, we add it to the set of live variables. If
//! we find an assignment to a variable, we remove it from the set of live
//! variables. When we have to merge two flows, we take the union of
//! those two flows -- if the variable is live on both paths, we simply
//! pick one ID. In the event of loops, we continue doing this until a
//! fixed point is reached.
//!
//! ## Checking initialization
//!
//! At the function entry point, all variables must be dead. If this is
//! not the case, we can report an error using the ID found in the set of
//! live variables, which identifies a use of the variable which is not
//! dominated by an assignment.
//!
//! ## Checking moves
//!
//! After each explicit move, the variable must be dead.
//!
//! ## Computing last uses
//!
//! Any use of the variable where the variable is dead afterwards is a
//! last use.
//!
//! # Implementation details
//!
//! The actual implementation contains two (nested) walks over the AST.
//! The outer walk has the job of building up the ir_maps instance for the
//! enclosing function. On the way down the tree, it identifies those AST
//! nodes and variable IDs that will be needed for the liveness analysis
//! and assigns them contiguous IDs. The liveness ID for an AST node is
//! called a `live_node` (it's a newtype'd `u32`) and the ID for a variable
//! is called a `variable` (another newtype'd `u32`).
//!
//! On the way back up the tree, as we are about to exit from a function
//! declaration we allocate a `liveness` instance. Now that we know
//! precisely how many nodes and variables we need, we can allocate all
//! the various arrays that we will need to precisely the right size. We then
//! perform the actual propagation on the `liveness` instance.
//!
//! This propagation is encoded in the various `propagate_through_*()`
//! methods. It effectively does a reverse walk of the AST; whenever we
//! reach a loop node, we iterate until a fixed point is reached.
//!
//! ## The `RWU` struct
//!
//! At each live node `N`, we track three pieces of information for each
//! variable `V` (these are encapsulated in the `RWU` struct):
//!
//! - `reader`: the `LiveNode` ID of some node which will read the value
//! that `V` holds on entry to `N`. Formally: a node `M` such
//! that there exists a path `P` from `N` to `M` where `P` does not
//! write `V`. If the `reader` is `invalid_node()`, then the current
//! value will never be read (the variable is dead, essentially).
//!
//! - `writer`: the `LiveNode` ID of some node which will write the
//! variable `V` and which is reachable from `N`. Formally: a node `M`
//! such that there exists a path `P` from `N` to `M` and `M` writes
//! `V`. If the `writer` is `invalid_node()`, then there is no writer
//! of `V` that follows `N`.
//!
//! - `used`: a boolean value indicating whether `V` is *used*. We
//! distinguish a *read* from a *use* in that a *use* is some read that
//! is not just used to generate a new value. For example, `x += 1` is
//! a read but not a use. This is used to generate better warnings.
//!
//! ## Special Variables
//!
//! We generate various special variables for various, well, special purposes.
//! These are described in the `specials` struct:
//!
//! - `exit_ln`: a live node that is generated to represent every 'exit' from
//! the function, whether it be by explicit return, panic, or other means.
//!
//! - `fallthrough_ln`: a live node that represents a fallthrough
//!
//! - `clean_exit_var`: a synthetic variable that is only 'read' from the
//! fallthrough node. It is only live if the function could converge
//! via means other than an explicit `return` expression. That is, it is
//! only dead if the end of the function's block can never be reached.
//! It is the responsibility of typeck to ensure that there are no
//! `return` expressions in a function declared as diverging.
use self::LoopKind::*;
use self::LiveNodeKind::*;
use self::VarKind::*;
use crate::hir::def::*;
use crate::hir::Node;
use crate::ty::{self, TyCtxt};
use crate::ty::query::Providers;
use crate::lint;
use crate::util::nodemap::{HirIdMap, HirIdSet};
use errors::Applicability;
use std::collections::{BTreeMap, VecDeque};
use std::{fmt, u32};
use std::io::prelude::*;
use std::io;
use std::rc::Rc;
use syntax::ast::{self, NodeId};
use syntax::ptr::P;
use syntax::symbol::keywords;
use syntax_pos::Span;
use crate::hir;
use crate::hir::{Expr, HirId};
use crate::hir::def_id::DefId;
use crate::hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap};
/// For use with `propagate_through_loop`.
enum LoopKind<'a> {
/// An endless `loop` loop.
LoopLoop,
/// A `while` loop, with the given expression as condition.
WhileLoop(&'a Expr),
}
#[derive(Copy, Clone, PartialEq)]
struct Variable(u32);
#[derive(Copy, Clone, PartialEq)]
struct LiveNode(u32);
impl Variable {
fn get(&self) -> usize { self.0 as usize }
}
impl LiveNode {
fn get(&self) -> usize { self.0 as usize }
}
#[derive(Copy, Clone, PartialEq, Debug)]
enum LiveNodeKind {
FreeVarNode(Span),
ExprNode(Span),
VarDefNode(Span),
ExitNode
}
fn live_node_kind_to_string(lnk: LiveNodeKind, tcx: TyCtxt<'_, '_, '_>) -> String {
let cm = tcx.sess.source_map();
match lnk {
FreeVarNode(s) => {
format!("Free var node [{}]", cm.span_to_string(s))
}
ExprNode(s) => {
format!("Expr node [{}]", cm.span_to_string(s))
}
VarDefNode(s) => {
format!("Var def node [{}]", cm.span_to_string(s))
}
ExitNode => "Exit node".to_owned(),
}
}
impl<'a, 'tcx> Visitor<'tcx> for IrMaps<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::OnlyBodies(&self.tcx.hir())
}
fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx hir::FnDecl,
b: hir::BodyId, s: Span, id: HirId) {
visit_fn(self, fk, fd, b, s, id);
}
fn visit_local(&mut self, l: &'tcx hir::Local) { visit_local(self, l); }
fn visit_expr(&mut self, ex: &'tcx Expr) { visit_expr(self, ex); }
fn visit_arm(&mut self, a: &'tcx hir::Arm) { visit_arm(self, a); }
}
fn check_mod_liveness<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) {
tcx.hir().visit_item_likes_in_module(module_def_id, &mut IrMaps::new(tcx).as_deep_visitor());
}
pub fn provide(providers: &mut Providers<'_>) {
*providers = Providers {
check_mod_liveness,
..*providers
};
}
impl fmt::Debug for LiveNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ln({})", self.get())
}
}
impl fmt::Debug for Variable {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "v({})", self.get())
}
}
// ______________________________________________________________________
// Creating ir_maps
//
// This is the first pass and the one that drives the main
// computation. It walks up and down the IR once. On the way down,
// we count for each function the number of variables as well as
// liveness nodes. A liveness node is basically an expression or
// capture clause that does something of interest: either it has
// interesting control flow or it uses/defines a local variable.
//
// On the way back up, at each function node we create liveness sets
// (we now know precisely how big to make our various vectors and so
// forth) and then do the data-flow propagation to compute the set
// of live variables at each program point.
//
// Finally, we run back over the IR one last time and, using the
// computed liveness, check various safety conditions. For example,
// there must be no live nodes at the definition site for a variable
// unless it has an initializer. Similarly, each non-mutable local
// variable must not be assigned if there is some successor
// assignment. And so forth.
impl LiveNode {
fn is_valid(&self) -> bool {
self.0 != u32::MAX
}
}
fn invalid_node() -> LiveNode { LiveNode(u32::MAX) }
struct CaptureInfo {
ln: LiveNode,
var_hid: HirId
}
#[derive(Copy, Clone, Debug)]
struct LocalInfo {
id: HirId,
name: ast::Name,
is_shorthand: bool,
}
#[derive(Copy, Clone, Debug)]
enum VarKind {
Arg(HirId, ast::Name),
Local(LocalInfo),
CleanExit
}
struct IrMaps<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
num_live_nodes: usize,
num_vars: usize,
live_node_map: HirIdMap<LiveNode>,
variable_map: HirIdMap<Variable>,
capture_info_map: HirIdMap<Rc<Vec<CaptureInfo>>>,
var_kinds: Vec<VarKind>,
lnks: Vec<LiveNodeKind>,
}
impl<'a, 'tcx> IrMaps<'a, 'tcx> {
fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> IrMaps<'a, 'tcx> {
IrMaps {
tcx,
num_live_nodes: 0,
num_vars: 0,
live_node_map: HirIdMap::default(),
variable_map: HirIdMap::default(),
capture_info_map: Default::default(),
var_kinds: Vec::new(),
lnks: Vec::new(),
}
}
fn add_live_node(&mut self, lnk: LiveNodeKind) -> LiveNode {
let ln = LiveNode(self.num_live_nodes as u32);
self.lnks.push(lnk);
self.num_live_nodes += 1;
debug!("{:?} is of kind {}", ln,
live_node_kind_to_string(lnk, self.tcx));
ln
}
fn add_live_node_for_node(&mut self, hir_id: HirId, lnk: LiveNodeKind) {
let ln = self.add_live_node(lnk);
self.live_node_map.insert(hir_id, ln);
debug!("{:?} is node {:?}", ln, hir_id);
}
fn add_variable(&mut self, vk: VarKind) -> Variable {
let v = Variable(self.num_vars as u32);
self.var_kinds.push(vk);
self.num_vars += 1;
match vk {
Local(LocalInfo { id: node_id, .. }) | Arg(node_id, _) => {
self.variable_map.insert(node_id, v);
},
CleanExit => {}
}
debug!("{:?} is {:?}", v, vk);
v
}
fn variable(&self, hir_id: HirId, span: Span) -> Variable {
match self.variable_map.get(&hir_id) {
Some(&var) => var,
None => {
span_bug!(span, "no variable registered for id {:?}", hir_id);
}
}
}
fn variable_name(&self, var: Variable) -> String {
match self.var_kinds[var.get()] {
Local(LocalInfo { name, .. }) | Arg(_, name) => {
name.to_string()
},
CleanExit => "<clean-exit>".to_owned()
}
}
fn variable_is_shorthand(&self, var: Variable) -> bool {
match self.var_kinds[var.get()] {
Local(LocalInfo { is_shorthand, .. }) => is_shorthand,
Arg(..) | CleanExit => false
}
}
fn set_captures(&mut self, hir_id: HirId, cs: Vec<CaptureInfo>) {
self.capture_info_map.insert(hir_id, Rc::new(cs));
}
fn lnk(&self, ln: LiveNode) -> LiveNodeKind {
self.lnks[ln.get()]
}
}
fn visit_fn<'a, 'tcx: 'a>(ir: &mut IrMaps<'a, 'tcx>,
fk: FnKind<'tcx>,
decl: &'tcx hir::FnDecl,
body_id: hir::BodyId,
sp: Span,
id: hir::HirId) {
debug!("visit_fn");
// swap in a new set of IR maps for this function body:
let mut fn_maps = IrMaps::new(ir.tcx);
// Don't run unused pass for #[derive()]
if let FnKind::Method(..) = fk {
let parent = ir.tcx.hir().get_parent_item(id);
if let Some(Node::Item(i)) = ir.tcx.hir().find_by_hir_id(parent) {
if i.attrs.iter().any(|a| a.check_name("automatically_derived")) {
return;
}
}
}
debug!("creating fn_maps: {:?}", &fn_maps as *const IrMaps<'_, '_>);
let body = ir.tcx.hir().body(body_id);
for arg in &body.arguments {
let is_shorthand = match arg.pat.node {
crate::hir::PatKind::Struct(..) => true,
_ => false,
};
arg.pat.each_binding(|_bm, hir_id, _x, ident| {
debug!("adding argument {:?}", hir_id);
let var = if is_shorthand {
Local(LocalInfo {
id: hir_id,
name: ident.name,
is_shorthand: true,
})
} else {
Arg(hir_id, ident.name)
};
fn_maps.add_variable(var);
})
};
// gather up the various local variables, significant expressions,
// and so forth:
intravisit::walk_fn(&mut fn_maps, fk, decl, body_id, sp, id);
// compute liveness
let mut lsets = Liveness::new(&mut fn_maps, body_id);
let entry_ln = lsets.compute(&body.value);
// check for various error conditions
lsets.visit_body(body);
lsets.warn_about_unused_args(body, entry_ln);
}
fn add_from_pat<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, pat: &P<hir::Pat>) {
// For struct patterns, take note of which fields used shorthand
// (`x` rather than `x: x`).
let mut shorthand_field_ids = HirIdSet::default();
let mut pats = VecDeque::new();
pats.push_back(pat);
while let Some(pat) = pats.pop_front() {
use crate::hir::PatKind::*;
match pat.node {
Binding(_, _, _, ref inner_pat) => {
pats.extend(inner_pat.iter());
}
Struct(_, ref fields, _) => {
for field in fields {
if field.node.is_shorthand {
shorthand_field_ids.insert(field.node.pat.hir_id);
}
}
}
Ref(ref inner_pat, _) |
Box(ref inner_pat) => {
pats.push_back(inner_pat);
}
TupleStruct(_, ref inner_pats, _) |
Tuple(ref inner_pats, _) => {
pats.extend(inner_pats.iter());
}
Slice(ref pre_pats, ref inner_pat, ref post_pats) => {
pats.extend(pre_pats.iter());
pats.extend(inner_pat.iter());
pats.extend(post_pats.iter());
}
_ => {}
}
}
pat.each_binding(|_bm, hir_id, _sp, ident| {
ir.add_live_node_for_node(hir_id, VarDefNode(ident.span));
ir.add_variable(Local(LocalInfo {
id: hir_id,
name: ident.name,
is_shorthand: shorthand_field_ids.contains(&hir_id)
}));
});
}
fn visit_local<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, local: &'tcx hir::Local) {
add_from_pat(ir, &local.pat);
intravisit::walk_local(ir, local);
}
fn visit_arm<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, arm: &'tcx hir::Arm) {
for pat in &arm.pats {
add_from_pat(ir, pat);
}
intravisit::walk_arm(ir, arm);
}
fn visit_expr<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, expr: &'tcx Expr) {
match expr.node {
// live nodes required for uses or definitions of variables:
hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
debug!("expr {}: path that leads to {:?}", expr.hir_id, path.def);
if let Def::Local(..) = path.def {
ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span));
}
intravisit::walk_expr(ir, expr);
}
hir::ExprKind::Closure(..) => {
// Interesting control flow (for loops can contain labeled
// breaks or continues)
ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span));
// Make a live_node for each captured variable, with the span
// being the location that the variable is used. This results
// in better error messages than just pointing at the closure
// construction site.
let mut call_caps = Vec::new();
ir.tcx.with_freevars(expr.hir_id, |freevars| {
call_caps.extend(freevars.iter().filter_map(|fv| {
if let Def::Local(rv) = fv.def {
let fv_ln = ir.add_live_node(FreeVarNode(fv.span));
let var_hid = ir.tcx.hir().node_to_hir_id(rv);
Some(CaptureInfo { ln: fv_ln, var_hid })
} else {
None
}
}));
});
ir.set_captures(expr.hir_id, call_caps);
intravisit::walk_expr(ir, expr);
}
// live nodes required for interesting control flow:
hir::ExprKind::If(..) |
hir::ExprKind::Match(..) |
hir::ExprKind::While(..) |
hir::ExprKind::Loop(..) => {
ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span));
intravisit::walk_expr(ir, expr);
}
hir::ExprKind::Binary(op, ..) if op.node.is_lazy() => {
ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span));
intravisit::walk_expr(ir, expr);
}
// otherwise, live nodes are not required:
hir::ExprKind::Index(..) |
hir::ExprKind::Field(..) |
hir::ExprKind::Array(..) |
hir::ExprKind::Call(..) |
hir::ExprKind::MethodCall(..) |
hir::ExprKind::Tup(..) |
hir::ExprKind::Binary(..) |
hir::ExprKind::AddrOf(..) |
hir::ExprKind::Cast(..) |
hir::ExprKind::Unary(..) |
hir::ExprKind::Break(..) |
hir::ExprKind::Continue(_) |
hir::ExprKind::Lit(_) |
hir::ExprKind::Ret(..) |
hir::ExprKind::Block(..) |
hir::ExprKind::Assign(..) |
hir::ExprKind::AssignOp(..) |
hir::ExprKind::Struct(..) |
hir::ExprKind::Repeat(..) |
hir::ExprKind::InlineAsm(..) |
hir::ExprKind::Box(..) |
hir::ExprKind::Yield(..) |
hir::ExprKind::Type(..) |
hir::ExprKind::Err |
hir::ExprKind::Path(hir::QPath::TypeRelative(..)) => {
intravisit::walk_expr(ir, expr);
}
}
}
// ______________________________________________________________________
// Computing liveness sets
//
// Actually we compute just a bit more than just liveness, but we use
// the same basic propagation framework in all cases.
#[derive(Clone, Copy)]
struct RWU {
reader: LiveNode,
writer: LiveNode,
used: bool
}
/// Conceptually, this is like a `Vec<RWU>`. But the number of `RWU`s can get
/// very large, so it uses a more compact representation that takes advantage
/// of the fact that when the number of `RWU`s is large, most of them have an
/// invalid reader and an invalid writer.
struct RWUTable {
/// Each entry in `packed_rwus` is either INV_INV_FALSE, INV_INV_TRUE, or
/// an index into `unpacked_rwus`. In the common cases, this compacts the
/// 65 bits of data into 32; in the uncommon cases, it expands the 65 bits
/// in 96.
///
/// More compact representations are possible -- e.g., use only 2 bits per
/// packed `RWU` and make the secondary table a HashMap that maps from
/// indices to `RWU`s -- but this one strikes a good balance between size
/// and speed.
packed_rwus: Vec<u32>,
unpacked_rwus: Vec<RWU>,
}
// A constant representing `RWU { reader: invalid_node(); writer: invalid_node(); used: false }`.
const INV_INV_FALSE: u32 = u32::MAX;
// A constant representing `RWU { reader: invalid_node(); writer: invalid_node(); used: true }`.
const INV_INV_TRUE: u32 = u32::MAX - 1;
impl RWUTable {
fn new(num_rwus: usize) -> RWUTable {
Self {
packed_rwus: vec![INV_INV_FALSE; num_rwus],
unpacked_rwus: vec![],
}
}
fn get(&self, idx: usize) -> RWU {
let packed_rwu = self.packed_rwus[idx];
match packed_rwu {
INV_INV_FALSE => RWU { reader: invalid_node(), writer: invalid_node(), used: false },
INV_INV_TRUE => RWU { reader: invalid_node(), writer: invalid_node(), used: true },
_ => self.unpacked_rwus[packed_rwu as usize],
}
}
fn get_reader(&self, idx: usize) -> LiveNode {
let packed_rwu = self.packed_rwus[idx];
match packed_rwu {
INV_INV_FALSE | INV_INV_TRUE => invalid_node(),
_ => self.unpacked_rwus[packed_rwu as usize].reader,
}
}
fn get_writer(&self, idx: usize) -> LiveNode {
let packed_rwu = self.packed_rwus[idx];
match packed_rwu {
INV_INV_FALSE | INV_INV_TRUE => invalid_node(),
_ => self.unpacked_rwus[packed_rwu as usize].writer,
}
}
fn get_used(&self, idx: usize) -> bool {
let packed_rwu = self.packed_rwus[idx];
match packed_rwu {
INV_INV_FALSE => false,
INV_INV_TRUE => true,
_ => self.unpacked_rwus[packed_rwu as usize].used,
}
}
#[inline]
fn copy_packed(&mut self, dst_idx: usize, src_idx: usize) {
self.packed_rwus[dst_idx] = self.packed_rwus[src_idx];
}
fn assign_unpacked(&mut self, idx: usize, rwu: RWU) {
if rwu.reader == invalid_node() && rwu.writer == invalid_node() {
// When we overwrite an indexing entry in `self.packed_rwus` with
// `INV_INV_{TRUE,FALSE}` we don't remove the corresponding entry
// from `self.unpacked_rwus`; it's not worth the effort, and we
// can't have entries shifting around anyway.
self.packed_rwus[idx] = if rwu.used {
INV_INV_TRUE
} else {
INV_INV_FALSE
}
} else {
// Add a new RWU to `unpacked_rwus` and make `packed_rwus[idx]`
// point to it.
self.packed_rwus[idx] = self.unpacked_rwus.len() as u32;
self.unpacked_rwus.push(rwu);
}
}
fn assign_inv_inv(&mut self, idx: usize) {
self.packed_rwus[idx] = if self.get_used(idx) {
INV_INV_TRUE
} else {
INV_INV_FALSE
};
}
}
#[derive(Copy, Clone)]
struct Specials {
exit_ln: LiveNode,
fallthrough_ln: LiveNode,
clean_exit_var: Variable
}
const ACC_READ: u32 = 1;
const ACC_WRITE: u32 = 2;
const ACC_USE: u32 = 4;
struct Liveness<'a, 'tcx: 'a> {
ir: &'a mut IrMaps<'a, 'tcx>,
tables: &'a ty::TypeckTables<'tcx>,
s: Specials,
successors: Vec<LiveNode>,
rwu_table: RWUTable,
// mappings from loop node ID to LiveNode
// ("break" label should map to loop node ID,
// it probably doesn't now)
break_ln: HirIdMap<LiveNode>,
cont_ln: HirIdMap<LiveNode>,
}
impl<'a, 'tcx> Liveness<'a, 'tcx> {
fn new(ir: &'a mut IrMaps<'a, 'tcx>, body: hir::BodyId) -> Liveness<'a, 'tcx> {
// Special nodes and variables:
// - exit_ln represents the end of the fn, either by return or panic
// - implicit_ret_var is a pseudo-variable that represents
// an implicit return
let specials = Specials {
exit_ln: ir.add_live_node(ExitNode),
fallthrough_ln: ir.add_live_node(ExitNode),
clean_exit_var: ir.add_variable(CleanExit)
};
let tables = ir.tcx.body_tables(body);
let num_live_nodes = ir.num_live_nodes;
let num_vars = ir.num_vars;
Liveness {
ir,
tables,
s: specials,
successors: vec![invalid_node(); num_live_nodes],
rwu_table: RWUTable::new(num_live_nodes * num_vars),
break_ln: Default::default(),
cont_ln: Default::default(),
}
}
fn live_node(&self, hir_id: HirId, span: Span) -> LiveNode {
match self.ir.live_node_map.get(&hir_id) {
Some(&ln) => ln,
None => {
// This must be a mismatch between the ir_map construction
// above and the propagation code below; the two sets of
// code have to agree about which AST nodes are worth
// creating liveness nodes for.
span_bug!(
span,
"no live node registered for node {:?}",
hir_id);
}
}
}
fn variable(&self, hir_id: HirId, span: Span) -> Variable {
self.ir.variable(hir_id, span)
}
fn pat_bindings<F>(&mut self, pat: &hir::Pat, mut f: F) where
F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, HirId),
{
pat.each_binding(|_bm, hir_id, sp, n| {
let ln = self.live_node(hir_id, sp);
let var = self.variable(hir_id, n.span);
f(self, ln, var, n.span, hir_id);
})
}
fn arm_pats_bindings<F>(&mut self, pat: Option<&hir::Pat>, f: F) where
F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, HirId),
{
if let Some(pat) = pat {
self.pat_bindings(pat, f);
}
}
fn define_bindings_in_pat(&mut self, pat: &hir::Pat, succ: LiveNode)
-> LiveNode {
self.define_bindings_in_arm_pats(Some(pat), succ)
}
fn define_bindings_in_arm_pats(&mut self, pat: Option<&hir::Pat>, succ: LiveNode)
-> LiveNode {
let mut succ = succ;
self.arm_pats_bindings(pat, |this, ln, var, _sp, _id| {
this.init_from_succ(ln, succ);
this.define(ln, var);
succ = ln;
});
succ
}
fn idx(&self, ln: LiveNode, var: Variable) -> usize {
ln.get() * self.ir.num_vars + var.get()
}
fn live_on_entry(&self, ln: LiveNode, var: Variable) -> Option<LiveNodeKind> {
assert!(ln.is_valid());
let reader = self.rwu_table.get_reader(self.idx(ln, var));
if reader.is_valid() { Some(self.ir.lnk(reader)) } else { None }
}
// Is this variable live on entry to any of its successor nodes?
fn live_on_exit(&self, ln: LiveNode, var: Variable)
-> Option<LiveNodeKind> {
let successor = self.successors[ln.get()];
self.live_on_entry(successor, var)
}
fn used_on_entry(&self, ln: LiveNode, var: Variable) -> bool {
assert!(ln.is_valid());
self.rwu_table.get_used(self.idx(ln, var))
}
fn assigned_on_entry(&self, ln: LiveNode, var: Variable)
-> Option<LiveNodeKind> {
assert!(ln.is_valid());
let writer = self.rwu_table.get_writer(self.idx(ln, var));
if writer.is_valid() { Some(self.ir.lnk(writer)) } else { None }
}
fn assigned_on_exit(&self, ln: LiveNode, var: Variable)
-> Option<LiveNodeKind> {
let successor = self.successors[ln.get()];
self.assigned_on_entry(successor, var)
}
fn indices2<F>(&mut self, ln: LiveNode, succ_ln: LiveNode, mut op: F) where
F: FnMut(&mut Liveness<'a, 'tcx>, usize, usize),
{
let node_base_idx = self.idx(ln, Variable(0));
let succ_base_idx = self.idx(succ_ln, Variable(0));
for var_idx in 0..self.ir.num_vars {
op(self, node_base_idx + var_idx, succ_base_idx + var_idx);
}
}
fn write_vars<F>(&self,
wr: &mut dyn Write,
ln: LiveNode,
mut test: F)
-> io::Result<()> where
F: FnMut(usize) -> LiveNode,
{
let node_base_idx = self.idx(ln, Variable(0));
for var_idx in 0..self.ir.num_vars {
let idx = node_base_idx + var_idx;
if test(idx).is_valid() {
write!(wr, " {:?}", Variable(var_idx as u32))?;
}
}
Ok(())
}
#[allow(unused_must_use)]
fn ln_str(&self, ln: LiveNode) -> String {
let mut wr = Vec::new();
{
let wr = &mut wr as &mut dyn Write;
write!(wr, "[ln({:?}) of kind {:?} reads", ln.get(), self.ir.lnk(ln));
self.write_vars(wr, ln, |idx| self.rwu_table.get_reader(idx));
write!(wr, " writes");
self.write_vars(wr, ln, |idx| self.rwu_table.get_writer(idx));
write!(wr, " precedes {:?}]", self.successors[ln.get()]);
}
String::from_utf8(wr).unwrap()
}
fn init_empty(&mut self, ln: LiveNode, succ_ln: LiveNode) {
self.successors[ln.get()] = succ_ln;
// It is not necessary to initialize the RWUs here because they are all
// set to INV_INV_FALSE when they are created, and the sets only grow
// during iterations.
}
fn init_from_succ(&mut self, ln: LiveNode, succ_ln: LiveNode) {
// more efficient version of init_empty() / merge_from_succ()
self.successors[ln.get()] = succ_ln;
self.indices2(ln, succ_ln, |this, idx, succ_idx| {
this.rwu_table.copy_packed(idx, succ_idx);
});
debug!("init_from_succ(ln={}, succ={})",
self.ln_str(ln), self.ln_str(succ_ln));
}
fn merge_from_succ(&mut self,
ln: LiveNode,
succ_ln: LiveNode,
first_merge: bool)
-> bool {
if ln == succ_ln { return false; }
let mut changed = false;
self.indices2(ln, succ_ln, |this, idx, succ_idx| {
let mut rwu = this.rwu_table.get(idx);
let succ_rwu = this.rwu_table.get(succ_idx);
if succ_rwu.reader.is_valid() && !rwu.reader.is_valid() {
rwu.reader = succ_rwu.reader;
changed = true
}
if succ_rwu.writer.is_valid() && !rwu.writer.is_valid() {
rwu.writer = succ_rwu.writer;
changed = true
}
if succ_rwu.used && !rwu.used {
rwu.used = true;
changed = true;
}
if changed {
this.rwu_table.assign_unpacked(idx, rwu);
}
});
debug!("merge_from_succ(ln={:?}, succ={}, first_merge={}, changed={})",
ln, self.ln_str(succ_ln), first_merge, changed);
return changed;
}
// Indicates that a local variable was *defined*; we know that no
// uses of the variable can precede the definition (resolve checks
// this) so we just clear out all the data.
fn define(&mut self, writer: LiveNode, var: Variable) {
let idx = self.idx(writer, var);
self.rwu_table.assign_inv_inv(idx);
debug!("{:?} defines {:?} (idx={}): {}", writer, var,
idx, self.ln_str(writer));
}
// Either read, write, or both depending on the acc bitset
fn acc(&mut self, ln: LiveNode, var: Variable, acc: u32) {
debug!("{:?} accesses[{:x}] {:?}: {}",
ln, acc, var, self.ln_str(ln));
let idx = self.idx(ln, var);
let mut rwu = self.rwu_table.get(idx);
if (acc & ACC_WRITE) != 0 {
rwu.reader = invalid_node();
rwu.writer = ln;
}
// Important: if we both read/write, must do read second
// or else the write will override.
if (acc & ACC_READ) != 0 {
rwu.reader = ln;
}
if (acc & ACC_USE) != 0 {
rwu.used = true;
}
self.rwu_table.assign_unpacked(idx, rwu);
}
fn compute(&mut self, body: &hir::Expr) -> LiveNode {
debug!("compute: using id for body, {}",
self.ir.tcx.hir().hir_to_pretty_string(body.hir_id));
// the fallthrough exit is only for those cases where we do not
// explicitly return:
let s = self.s;
self.init_from_succ(s.fallthrough_ln, s.exit_ln);
self.acc(s.fallthrough_ln, s.clean_exit_var, ACC_READ);
let entry_ln = self.propagate_through_expr(body, s.fallthrough_ln);
// hack to skip the loop unless debug! is enabled:
debug!("^^ liveness computation results for body {} (entry={:?})", {
for ln_idx in 0..self.ir.num_live_nodes {
debug!("{:?}", self.ln_str(LiveNode(ln_idx as u32)));
}
body.hir_id
},
entry_ln);
entry_ln
}
fn propagate_through_block(&mut self, blk: &hir::Block, succ: LiveNode)
-> LiveNode {
if blk.targeted_by_break {
self.break_ln.insert(blk.hir_id, succ);
}
let succ = self.propagate_through_opt_expr(blk.expr.as_ref().map(|e| &**e), succ);
blk.stmts.iter().rev().fold(succ, |succ, stmt| {
self.propagate_through_stmt(stmt, succ)
})
}
fn propagate_through_stmt(&mut self, stmt: &hir::Stmt, succ: LiveNode)
-> LiveNode {
match stmt.node {
hir::StmtKind::Local(ref local) => {
// Note: we mark the variable as defined regardless of whether
// there is an initializer. Initially I had thought to only mark
// the live variable as defined if it was initialized, and then we
// could check for uninit variables just by scanning what is live
// at the start of the function. But that doesn't work so well for
// immutable variables defined in a loop:
// loop { let x; x = 5; }
// because the "assignment" loops back around and generates an error.
//
// So now we just check that variables defined w/o an
// initializer are not live at the point of their
// initialization, which is mildly more complex than checking
// once at the func header but otherwise equivalent.
let succ = self.propagate_through_opt_expr(local.init.as_ref().map(|e| &**e), succ);
self.define_bindings_in_pat(&local.pat, succ)
}
hir::StmtKind::Item(..) => succ,
hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => {
self.propagate_through_expr(&expr, succ)
}
}
}
fn propagate_through_exprs(&mut self, exprs: &[Expr], succ: LiveNode)
-> LiveNode {
exprs.iter().rev().fold(succ, |succ, expr| {
self.propagate_through_expr(&expr, succ)
})
}
fn propagate_through_opt_expr(&mut self,
opt_expr: Option<&Expr>,
succ: LiveNode)
-> LiveNode {
opt_expr.map_or(succ, |expr| self.propagate_through_expr(expr, succ))
}
fn propagate_through_expr(&mut self, expr: &Expr, succ: LiveNode)
-> LiveNode {
debug!("propagate_through_expr: {}", self.ir.tcx.hir().hir_to_pretty_string(expr.hir_id));
match expr.node {
// Interesting cases with control flow or which gen/kill
hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
self.access_path(expr.hir_id, path, succ, ACC_READ | ACC_USE)
}
hir::ExprKind::Field(ref e, _) => {
self.propagate_through_expr(&e, succ)
}
hir::ExprKind::Closure(..) => {
debug!("{} is an ExprKind::Closure",
self.ir.tcx.hir().hir_to_pretty_string(expr.hir_id));
// the construction of a closure itself is not important,
// but we have to consider the closed over variables.
let caps = self.ir.capture_info_map.get(&expr.hir_id).cloned().unwrap_or_else(||
span_bug!(expr.span, "no registered caps"));
caps.iter().rev().fold(succ, |succ, cap| {
self.init_from_succ(cap.ln, succ);
let var = self.variable(cap.var_hid, expr.span);
self.acc(cap.ln, var, ACC_READ | ACC_USE);
cap.ln
})
}
hir::ExprKind::If(ref cond, ref then, ref els) => {
//
// (cond)
// |
// v
// (expr)
// / \
// | |
// v v
// (then)(els)
// | |
// v v
// ( succ )
//
let else_ln = self.propagate_through_opt_expr(els.as_ref().map(|e| &**e), succ);
let then_ln = self.propagate_through_expr(&then, succ);
let ln = self.live_node(expr.hir_id, expr.span);
self.init_from_succ(ln, else_ln);
self.merge_from_succ(ln, then_ln, false);
self.propagate_through_expr(&cond, ln)
}
hir::ExprKind::While(ref cond, ref blk, _) => {
self.propagate_through_loop(expr, WhileLoop(&cond), &blk, succ)
}
// Note that labels have been resolved, so we don't need to look
// at the label ident
hir::ExprKind::Loop(ref blk, _, _) => {
self.propagate_through_loop(expr, LoopLoop, &blk, succ)
}
hir::ExprKind::Match(ref e, ref arms, _) => {
//
// (e)
// |
// v
// (expr)
// / | \
// | | |
// v v v
// (..arms..)
// | | |
// v v v
// ( succ )
//
//
let ln = self.live_node(expr.hir_id, expr.span);
self.init_empty(ln, succ);
let mut first_merge = true;
for arm in arms {
let body_succ = self.propagate_through_expr(&arm.body, succ);
let guard_succ = self.propagate_through_opt_expr(
arm.guard.as_ref().map(|hir::Guard::If(e)| &**e),
body_succ
);
// only consider the first pattern; any later patterns must have
// the same bindings, and we also consider the first pattern to be
// the "authoritative" set of ids
let arm_succ =
self.define_bindings_in_arm_pats(arm.pats.first().map(|p| &**p),
guard_succ);
self.merge_from_succ(ln, arm_succ, first_merge);
first_merge = false;
};
self.propagate_through_expr(&e, ln)
}
hir::ExprKind::Ret(ref o_e) => {
// ignore succ and subst exit_ln:
let exit_ln = self.s.exit_ln;
self.propagate_through_opt_expr(o_e.as_ref().map(|e| &**e), exit_ln)
}
hir::ExprKind::Break(label, ref opt_expr) => {
// Find which label this break jumps to
let target = match label.target_id {
Ok(hir_id) => self.break_ln.get(&hir_id),
Err(err) => span_bug!(expr.span, "loop scope error: {}", err),
}.cloned();
// Now that we know the label we're going to,
// look it up in the break loop nodes table
match target {
Some(b) => self.propagate_through_opt_expr(opt_expr.as_ref().map(|e| &**e), b),
None => span_bug!(expr.span, "break to unknown label")
}
}
hir::ExprKind::Continue(label) => {
// Find which label this expr continues to
let sc = label.target_id.unwrap_or_else(|err|
span_bug!(expr.span, "loop scope error: {}", err));
// Now that we know the label we're going to,
// look it up in the continue loop nodes table
self.cont_ln.get(&sc).cloned().unwrap_or_else(||
span_bug!(expr.span, "continue to unknown label"))
}
hir::ExprKind::Assign(ref l, ref r) => {
// see comment on places in
// propagate_through_place_components()
let succ = self.write_place(&l, succ, ACC_WRITE);
let succ = self.propagate_through_place_components(&l, succ);
self.propagate_through_expr(&r, succ)
}
hir::ExprKind::AssignOp(_, ref l, ref r) => {
// an overloaded assign op is like a method call
if self.tables.is_method_call(expr) {
let succ = self.propagate_through_expr(&l, succ);
self.propagate_through_expr(&r, succ)
} else {
// see comment on places in
// propagate_through_place_components()
let succ = self.write_place(&l, succ, ACC_WRITE|ACC_READ);
let succ = self.propagate_through_expr(&r, succ);
self.propagate_through_place_components(&l, succ)
}
}
// Uninteresting cases: just propagate in rev exec order
hir::ExprKind::Array(ref exprs) => {
self.propagate_through_exprs(exprs, succ)
}
hir::ExprKind::Struct(_, ref fields, ref with_expr) => {
let succ = self.propagate_through_opt_expr(with_expr.as_ref().map(|e| &**e), succ);
fields.iter().rev().fold(succ, |succ, field| {
self.propagate_through_expr(&field.expr, succ)
})
}
hir::ExprKind::Call(ref f, ref args) => {
let m = self.ir.tcx.hir().get_module_parent_by_hir_id(expr.hir_id);
let succ = if self.ir.tcx.is_ty_uninhabited_from(m, self.tables.expr_ty(expr)) {
self.s.exit_ln
} else {
succ
};
let succ = self.propagate_through_exprs(args, succ);
self.propagate_through_expr(&f, succ)
}
hir::ExprKind::MethodCall(.., ref args) => {
let m = self.ir.tcx.hir().get_module_parent_by_hir_id(expr.hir_id);
let succ = if self.ir.tcx.is_ty_uninhabited_from(m, self.tables.expr_ty(expr)) {
self.s.exit_ln
} else {
succ
};
self.propagate_through_exprs(args, succ)
}
hir::ExprKind::Tup(ref exprs) => {
self.propagate_through_exprs(exprs, succ)
}
hir::ExprKind::Binary(op, ref l, ref r) if op.node.is_lazy() => {
let r_succ = self.propagate_through_expr(&r, succ);
let ln = self.live_node(expr.hir_id, expr.span);
self.init_from_succ(ln, succ);
self.merge_from_succ(ln, r_succ, false);
self.propagate_through_expr(&l, ln)
}
hir::ExprKind::Index(ref l, ref r) |
hir::ExprKind::Binary(_, ref l, ref r) => {
let r_succ = self.propagate_through_expr(&r, succ);
self.propagate_through_expr(&l, r_succ)
}
hir::ExprKind::Box(ref e) |
hir::ExprKind::AddrOf(_, ref e) |
hir::ExprKind::Cast(ref e, _) |
hir::ExprKind::Type(ref e, _) |
hir::ExprKind::Unary(_, ref e) |
hir::ExprKind::Yield(ref e) |
hir::ExprKind::Repeat(ref e, _) => {
self.propagate_through_expr(&e, succ)
}
hir::ExprKind::InlineAsm(ref ia, ref outputs, ref inputs) => {
let succ = ia.outputs.iter().zip(outputs).rev().fold(succ, |succ, (o, output)| {
// see comment on places
// in propagate_through_place_components()
if o.is_indirect {
self.propagate_through_expr(output, succ)
} else {
let acc = if o.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE };
let succ = self.write_place(output, succ, acc);
self.propagate_through_place_components(output, succ)
}});
// Inputs are executed first. Propagate last because of rev order
self.propagate_through_exprs(inputs, succ)
}
hir::ExprKind::Lit(..) | hir::ExprKind::Err |
hir::ExprKind::Path(hir::QPath::TypeRelative(..)) => {
succ
}
// Note that labels have been resolved, so we don't need to look
// at the label ident
hir::ExprKind::Block(ref blk, _) => {
self.propagate_through_block(&blk, succ)
}
}
}
fn propagate_through_place_components(&mut self,
expr: &Expr,
succ: LiveNode)
-> LiveNode {
// # Places
//
// In general, the full flow graph structure for an
// assignment/move/etc can be handled in one of two ways,
// depending on whether what is being assigned is a "tracked
// value" or not. A tracked value is basically a local
// variable or argument.
//
// The two kinds of graphs are:
//
// Tracked place Untracked place
// ----------------------++-----------------------
// ||
// | || |
// v || v
// (rvalue) || (rvalue)
// | || |
// v || v
// (write of place) || (place components)
// | || |
// v || v
// (succ) || (succ)
// ||
// ----------------------++-----------------------
//
// I will cover the two cases in turn:
//
// # Tracked places
//
// A tracked place is a local variable/argument `x`. In
// these cases, the link_node where the write occurs is linked
// to node id of `x`. The `write_place()` routine generates
// the contents of this node. There are no subcomponents to
// consider.
//
// # Non-tracked places
//
// These are places like `x[5]` or `x.f`. In that case, we
// basically ignore the value which is written to but generate
// reads for the components---`x` in these two examples. The
// components reads are generated by
// `propagate_through_place_components()` (this fn).
//
// # Illegal places
//
// It is still possible to observe assignments to non-places;
// these errors are detected in the later pass borrowck. We
// just ignore such cases and treat them as reads.
match expr.node {
hir::ExprKind::Path(_) => succ,
hir::ExprKind::Field(ref e, _) => self.propagate_through_expr(&e, succ),
_ => self.propagate_through_expr(expr, succ)
}
}
// see comment on propagate_through_place()
fn write_place(&mut self, expr: &Expr, succ: LiveNode, acc: u32) -> LiveNode {
match expr.node {
hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
self.access_path(expr.hir_id, path, succ, acc)
}
// We do not track other places, so just propagate through
// to their subcomponents. Also, it may happen that
// non-places occur here, because those are detected in the
// later pass borrowck.
_ => succ
}
}
fn access_var(&mut self, hir_id: HirId, nid: NodeId, succ: LiveNode, acc: u32, span: Span)
-> LiveNode {
let ln = self.live_node(hir_id, span);
if acc != 0 {
self.init_from_succ(ln, succ);
let var_hid = self.ir.tcx.hir().node_to_hir_id(nid);
let var = self.variable(var_hid, span);
self.acc(ln, var, acc);
}
ln
}
fn access_path(&mut self, hir_id: HirId, path: &hir::Path, succ: LiveNode, acc: u32)
-> LiveNode {
match path.def {
Def::Local(nid) => {
self.access_var(hir_id, nid, succ, acc, path.span)
}
_ => succ
}
}
fn propagate_through_loop(&mut self,
expr: &Expr,
kind: LoopKind<'_>,
body: &hir::Block,
succ: LiveNode)
-> LiveNode {
/*
We model control flow like this:
(cond) <--+
| |
v |
+-- (expr) |
| | |
| v |
| (body) ---+
|
|
v
(succ)
*/
// first iteration:
let mut first_merge = true;
let ln = self.live_node(expr.hir_id, expr.span);
self.init_empty(ln, succ);
match kind {
LoopLoop => {}
_ => {
// If this is not a `loop` loop, then it's possible we bypass
// the body altogether. Otherwise, the only way is via a `break`
// in the loop body.
self.merge_from_succ(ln, succ, first_merge);
first_merge = false;
}
}
debug!("propagate_through_loop: using id for loop body {} {}",
expr.hir_id, self.ir.tcx.hir().hir_to_pretty_string(body.hir_id));
self.break_ln.insert(expr.hir_id, succ);
let cond_ln = match kind {
LoopLoop => ln,
WhileLoop(ref cond) => self.propagate_through_expr(&cond, ln),
};
self.cont_ln.insert(expr.hir_id, cond_ln);
let body_ln = self.propagate_through_block(body, cond_ln);
// repeat until fixed point is reached:
while self.merge_from_succ(ln, body_ln, first_merge) {
first_merge = false;
let new_cond_ln = match kind {
LoopLoop => ln,
WhileLoop(ref cond) => {
self.propagate_through_expr(&cond, ln)
}
};
assert_eq!(cond_ln, new_cond_ln);
assert_eq!(body_ln, self.propagate_through_block(body, cond_ln));
}
cond_ln
}
}
// _______________________________________________________________________
// Checking for error conditions
impl<'a, 'tcx> Visitor<'tcx> for Liveness<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::None
}
fn visit_local(&mut self, l: &'tcx hir::Local) {
check_local(self, l);
}
fn visit_expr(&mut self, ex: &'tcx Expr) {
check_expr(self, ex);
}
fn visit_arm(&mut self, a: &'tcx hir::Arm) {
check_arm(self, a);
}
}
fn check_local<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, local: &'tcx hir::Local) {
match local.init {
Some(_) => {
this.warn_about_unused_or_dead_vars_in_pat(&local.pat);
},
None => {
this.pat_bindings(&local.pat, |this, ln, var, sp, id| {
let span = local.pat.simple_ident().map_or(sp, |ident| ident.span);
this.warn_about_unused(vec![span], id, ln, var);
})
}
}
intravisit::walk_local(this, local);
}
fn check_arm<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, arm: &'tcx hir::Arm) {
// Only consider the variable from the first pattern; any later patterns must have
// the same bindings, and we also consider the first pattern to be the "authoritative" set of
// ids. However, we should take the spans of variables with the same name from the later
// patterns so the suggestions to prefix with underscores will apply to those too.
let mut vars: BTreeMap<String, (LiveNode, Variable, HirId, Vec<Span>)> = Default::default();
for pat in &arm.pats {
this.arm_pats_bindings(Some(&*pat), |this, ln, var, sp, id| {
let name = this.ir.variable_name(var);
vars.entry(name)
.and_modify(|(.., spans)| {
spans.push(sp);
})
.or_insert_with(|| {
(ln, var, id, vec![sp])
});
});
}
for (_, (ln, var, id, spans)) in vars {
this.warn_about_unused(spans, id, ln, var);
}
intravisit::walk_arm(this, arm);
}
fn check_expr<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, expr: &'tcx Expr) {
match expr.node {
hir::ExprKind::Assign(ref l, _) => {
this.check_place(&l);
intravisit::walk_expr(this, expr);
}
hir::ExprKind::AssignOp(_, ref l, _) => {
if !this.tables.is_method_call(expr) {
this.check_place(&l);
}
intravisit::walk_expr(this, expr);
}
hir::ExprKind::InlineAsm(ref ia, ref outputs, ref inputs) => {
for input in inputs {
this.visit_expr(input);
}
// Output operands must be places
for (o, output) in ia.outputs.iter().zip(outputs) {
if !o.is_indirect {
this.check_place(output);
}
this.visit_expr(output);
}
intravisit::walk_expr(this, expr);
}
// no correctness conditions related to liveness
hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) | hir::ExprKind::If(..) |
hir::ExprKind::Match(..) | hir::ExprKind::While(..) | hir::ExprKind::Loop(..) |
hir::ExprKind::Index(..) | hir::ExprKind::Field(..) |
hir::ExprKind::Array(..) | hir::ExprKind::Tup(..) | hir::ExprKind::Binary(..) |
hir::ExprKind::Cast(..) | hir::ExprKind::Unary(..) | hir::ExprKind::Ret(..) |
hir::ExprKind::Break(..) | hir::ExprKind::Continue(..) | hir::ExprKind::Lit(_) |
hir::ExprKind::Block(..) | hir::ExprKind::AddrOf(..) |
hir::ExprKind::Struct(..) | hir::ExprKind::Repeat(..) |
hir::ExprKind::Closure(..) | hir::ExprKind::Path(_) | hir::ExprKind::Yield(..) |
hir::ExprKind::Box(..) | hir::ExprKind::Type(..) | hir::ExprKind::Err => {
intravisit::walk_expr(this, expr);
}
}
}
impl<'a, 'tcx> Liveness<'a, 'tcx> {
fn check_place(&mut self, expr: &'tcx Expr) {
match expr.node {
hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
if let Def::Local(nid) = path.def {
// Assignment to an immutable variable or argument: only legal
// if there is no later assignment. If this local is actually
// mutable, then check for a reassignment to flag the mutability
// as being used.
let ln = self.live_node(expr.hir_id, expr.span);
let var_hid = self.ir.tcx.hir().node_to_hir_id(nid);
let var = self.variable(var_hid, expr.span);
self.warn_about_dead_assign(expr.span, expr.hir_id, ln, var);
}
}
_ => {
// For other kinds of places, no checks are required,
// and any embedded expressions are actually rvalues
intravisit::walk_expr(self, expr);
}
}
}
fn should_warn(&self, var: Variable) -> Option<String> {
let name = self.ir.variable_name(var);
if name.is_empty() || name.as_bytes()[0] == b'_' {
None
} else {
Some(name)
}
}
fn warn_about_unused_args(&self, body: &hir::Body, entry_ln: LiveNode) {
for arg in &body.arguments {
arg.pat.each_binding(|_bm, hir_id, _, ident| {
let sp = ident.span;
let var = self.variable(hir_id, sp);
// Ignore unused self.
if ident.name != keywords::SelfLower.name() {
if !self.warn_about_unused(vec![sp], hir_id, entry_ln, var) {
if self.live_on_entry(entry_ln, var).is_none() {
self.report_dead_assign(hir_id, sp, var, true);
}
}
}
})
}
}
fn warn_about_unused_or_dead_vars_in_pat(&mut self, pat: &hir::Pat) {
self.pat_bindings(pat, |this, ln, var, sp, id| {
if !this.warn_about_unused(vec![sp], id, ln, var) {
this.warn_about_dead_assign(sp, id, ln, var);
}
})
}
fn warn_about_unused(&self,
spans: Vec<Span>,
hir_id: HirId,
ln: LiveNode,
var: Variable)
-> bool {
if !self.used_on_entry(ln, var) {
let r = self.should_warn(var);
if let Some(name) = r {
// annoying: for parameters in funcs like `fn(x: i32)
// {ret}`, there is only one node, so asking about
// assigned_on_exit() is not meaningful.
let is_assigned = if ln == self.s.exit_ln {
false
} else {
self.assigned_on_exit(ln, var).is_some()
};
if is_assigned {
self.ir.tcx.lint_hir_note(
lint::builtin::UNUSED_VARIABLES,
hir_id,
spans.clone(),
&format!("variable `{}` is assigned to, but never used", name),
&format!("consider using `_{}` instead", name),
);
} else if name != "self" {
let mut err = self.ir.tcx.struct_span_lint_hir(
lint::builtin::UNUSED_VARIABLES,
hir_id,
spans.clone(),
&format!("unused variable: `{}`", name),
);
if self.ir.variable_is_shorthand(var) {
err.multipart_suggestion(
"try ignoring the field",
spans.iter().map(|span| (*span, format!("{}: _", name))).collect(),
Applicability::MachineApplicable
);
} else {
err.multipart_suggestion(
"consider prefixing with an underscore",
spans.iter().map(|span| (*span, format!("_{}", name))).collect(),
Applicability::MachineApplicable,
);
}
err.emit()
}
}
true
} else {
false
}
}
fn warn_about_dead_assign(&self, sp: Span, hir_id: HirId, ln: LiveNode, var: Variable) {
if self.live_on_exit(ln, var).is_none() {
self.report_dead_assign(hir_id, sp, var, false);
}
}
fn report_dead_assign(&self, hir_id: HirId, sp: Span, var: Variable, is_argument: bool) {
if let Some(name) = self.should_warn(var) {
if is_argument {
self.ir.tcx.struct_span_lint_hir(lint::builtin::UNUSED_ASSIGNMENTS, hir_id, sp,
&format!("value passed to `{}` is never read", name))
.help("maybe it is overwritten before being read?")
.emit();
} else {
self.ir.tcx.struct_span_lint_hir(lint::builtin::UNUSED_ASSIGNMENTS, hir_id, sp,
&format!("value assigned to `{}` is never read", name))
.help("maybe it is overwritten before being read?")
.emit();
}
}
}
}
| 36.437276 | 100 | 0.532445 |
0e705a31ce132e9e3f7c67702896c96d97adecbe | 12,679 | // this file is auto-generated by hap-codegen
use serde::ser::{Serialize, Serializer};
/// HAP Service and Characteristic type representation.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum HapType {
Unknown,
AccessoryFlags,
Active,
ActiveIdentifier,
AdministratorOnlyAccess,
AirParticulateDensity,
AirParticulateSize,
AirQuality,
AudioFeedback,
BatteryLevel,
Brightness,
CarbonDioxideDetected,
CarbonDioxideLevel,
CarbonDioxidePeakLevel,
CarbonMonoxideDetected,
CarbonMonoxideLevel,
CarbonMonoxidePeakLevel,
ChargingState,
ClosedCaptions,
ConfiguredName,
DisplayOrder,
ColorTemperature,
ContactSensorState,
CoolingThresholdTemperature,
CurrentAirPurifierState,
CurrentAmbientLightLevel,
CurrentDoorState,
CurrentFanState,
CurrentHeaterCoolerState,
CurrentHeatingCoolingState,
CurrentHorizontalTiltAngle,
CurrentHumidifierDehumidifierState,
CurrentMediaState,
TargetMediaState,
CurrentPosition,
CurrentRelativeHumidity,
CurrentSlatState,
CurrentTemperature,
CurrentTiltAngle,
CurrentVerticalTiltAngle,
DigitalZoom,
FilterChangeIndication,
FilterLifeLevel,
FirmwareRevision,
HardwareRevision,
HeatingThresholdTemperature,
HoldPosition,
Hue,
Identify,
InputSourceType,
InputDeviceType,
Identifier,
CurrentVisibilityState,
TargetVisibilityState,
ImageMirroring,
ImageRotation,
InUse,
IsConfigured,
LeakDetected,
LockControlPoint,
LockCurrentState,
LockLastKnownAction,
LockManagementAutoSecurityTimeout,
LockPhysicalControls,
LockTargetState,
Logs,
Manufacturer,
Model,
MotionDetected,
Mute,
Name,
NightVision,
NitrogenDioxideDensity,
ObstructionDetected,
OccupancyDetected,
On,
OpticalZoom,
OutletInUse,
OzoneDensity,
PairSetup,
PairVerify,
PairingFeatures,
PairingPairings,
Pm10Density,
Pm2_5Density,
PositionState,
PictureMode,
PowerModeSelection,
ProgramMode,
ProgrammableSwitchEvent,
RemoteKey,
RelativeHumidityDehumidifierThreshold,
RelativeHumidityHumidifierThreshold,
RemainingDuration,
ResetFilterIndication,
RotationDirection,
RotationSpeed,
Saturation,
SecuritySystemAlarmType,
SecuritySystemCurrentState,
SecuritySystemTargetState,
SelectedRtpStreamConfiguration,
SerialNumber,
ServiceLabelIndex,
ServiceLabelNamespace,
SetDuration,
SetupEndpoints,
SlatType,
SleepDiscoveryMode,
SmokeDetected,
StatusActive,
StatusFault,
StatusJammed,
StatusLowBattery,
StatusTampered,
StreamingStatus,
SulphurDioxideDensity,
SupportedAudioStreamConfiguration,
SupportedRtpConfiguration,
SupportedVideoStreamConfiguration,
SwingMode,
TargetAirPurifierState,
TargetAirQuality,
TargetDoorState,
TargetFanState,
TargetHeaterCoolerState,
TargetHeatingCoolingState,
TargetHorizontalTiltAngle,
TargetHumidifierDehumidifierState,
TargetPosition,
TargetRelativeHumidity,
TargetSlatState,
TargetTemperature,
TargetTiltAngle,
TargetVerticalTiltAngle,
TemperatureDisplayUnits,
ValveType,
Version,
VocDensity,
Volume,
VolumeControlType,
VolumeSelector,
WaterLevel,
SupportedCameraRecordingConfiguration,
SupportedVideoRecordingConfiguration,
SupportedAudioRecordingConfiguration,
SelectedCameraRecordingConfiguration,
AccessoryInformation,
AirPurifier,
AirQualitySensor,
BatteryService,
CameraRtpStreamManagement,
CarbonDioxideSensor,
CarbonMonoxideSensor,
ContactSensor,
Door,
Doorbell,
Fan,
FanV2,
FilterMaintenance,
Faucet,
GarageDoorOpener,
HeaterCooler,
HumidifierDehumidifier,
HumiditySensor,
IrrigationSystem,
LeakSensor,
LightSensor,
Lightbulb,
LockManagement,
LockMechanism,
Microphone,
MotionSensor,
OccupancySensor,
Outlet,
SecuritySystem,
ServiceLabel,
Slat,
SmokeSensor,
Speaker,
StatelessProgrammableSwitch,
Switch,
TemperatureSensor,
Thermostat,
Valve,
Window,
WindowCovering,
Television,
InputSource,
}
impl ToString for HapType {
fn to_string(&self) -> String {
match self {
HapType::Unknown => "unknown".into(),
HapType::AccessoryFlags => "A6".into(),
HapType::Active => "B0".into(),
HapType::ActiveIdentifier => "E7".into(),
HapType::AdministratorOnlyAccess => "1".into(),
HapType::AirParticulateDensity => "64".into(),
HapType::AirParticulateSize => "65".into(),
HapType::AirQuality => "95".into(),
HapType::AudioFeedback => "5".into(),
HapType::BatteryLevel => "68".into(),
HapType::Brightness => "8".into(),
HapType::CarbonDioxideDetected => "92".into(),
HapType::CarbonDioxideLevel => "93".into(),
HapType::CarbonDioxidePeakLevel => "94".into(),
HapType::CarbonMonoxideDetected => "69".into(),
HapType::CarbonMonoxideLevel => "90".into(),
HapType::CarbonMonoxidePeakLevel => "91".into(),
HapType::ChargingState => "8F".into(),
HapType::ClosedCaptions => "DD".into(),
HapType::ConfiguredName => "E3".into(),
HapType::DisplayOrder => "136".into(),
HapType::ColorTemperature => "CE".into(),
HapType::ContactSensorState => "6A".into(),
HapType::CoolingThresholdTemperature => "D".into(),
HapType::CurrentAirPurifierState => "A9".into(),
HapType::CurrentAmbientLightLevel => "6B".into(),
HapType::CurrentDoorState => "E".into(),
HapType::CurrentFanState => "AF".into(),
HapType::CurrentHeaterCoolerState => "B1".into(),
HapType::CurrentHeatingCoolingState => "F".into(),
HapType::CurrentHorizontalTiltAngle => "6C".into(),
HapType::CurrentHumidifierDehumidifierState => "B3".into(),
HapType::CurrentMediaState => "E0".into(),
HapType::TargetMediaState => "137".into(),
HapType::CurrentPosition => "6D".into(),
HapType::CurrentRelativeHumidity => "10".into(),
HapType::CurrentSlatState => "AA".into(),
HapType::CurrentTemperature => "11".into(),
HapType::CurrentTiltAngle => "C1".into(),
HapType::CurrentVerticalTiltAngle => "6E".into(),
HapType::DigitalZoom => "11D".into(),
HapType::FilterChangeIndication => "AC".into(),
HapType::FilterLifeLevel => "AB".into(),
HapType::FirmwareRevision => "52".into(),
HapType::HardwareRevision => "53".into(),
HapType::HeatingThresholdTemperature => "12".into(),
HapType::HoldPosition => "6F".into(),
HapType::Hue => "13".into(),
HapType::Identify => "14".into(),
HapType::InputSourceType => "DB".into(),
HapType::InputDeviceType => "DC".into(),
HapType::Identifier => "E6".into(),
HapType::CurrentVisibilityState => "135".into(),
HapType::TargetVisibilityState => "134".into(),
HapType::ImageMirroring => "11F".into(),
HapType::ImageRotation => "11E".into(),
HapType::InUse => "D2".into(),
HapType::IsConfigured => "D6".into(),
HapType::LeakDetected => "70".into(),
HapType::LockControlPoint => "19".into(),
HapType::LockCurrentState => "1D".into(),
HapType::LockLastKnownAction => "1C".into(),
HapType::LockManagementAutoSecurityTimeout => "1A".into(),
HapType::LockPhysicalControls => "A7".into(),
HapType::LockTargetState => "1E".into(),
HapType::Logs => "1F".into(),
HapType::Manufacturer => "20".into(),
HapType::Model => "21".into(),
HapType::MotionDetected => "22".into(),
HapType::Mute => "11A".into(),
HapType::Name => "23".into(),
HapType::NightVision => "11B".into(),
HapType::NitrogenDioxideDensity => "C4".into(),
HapType::ObstructionDetected => "24".into(),
HapType::OccupancyDetected => "71".into(),
HapType::On => "25".into(),
HapType::OpticalZoom => "11C".into(),
HapType::OutletInUse => "26".into(),
HapType::OzoneDensity => "C3".into(),
HapType::PairSetup => "4C".into(),
HapType::PairVerify => "4E".into(),
HapType::PairingFeatures => "4F".into(),
HapType::PairingPairings => "50".into(),
HapType::Pm10Density => "C7".into(),
HapType::Pm2_5Density => "C6".into(),
HapType::PositionState => "72".into(),
HapType::PictureMode => "E2".into(),
HapType::PowerModeSelection => "DF".into(),
HapType::ProgramMode => "D1".into(),
HapType::ProgrammableSwitchEvent => "73".into(),
HapType::RemoteKey => "E1".into(),
HapType::RelativeHumidityDehumidifierThreshold => "C9".into(),
HapType::RelativeHumidityHumidifierThreshold => "CA".into(),
HapType::RemainingDuration => "D4".into(),
HapType::ResetFilterIndication => "AD".into(),
HapType::RotationDirection => "28".into(),
HapType::RotationSpeed => "29".into(),
HapType::Saturation => "2F".into(),
HapType::SecuritySystemAlarmType => "8E".into(),
HapType::SecuritySystemCurrentState => "66".into(),
HapType::SecuritySystemTargetState => "67".into(),
HapType::SelectedRtpStreamConfiguration => "117".into(),
HapType::SerialNumber => "30".into(),
HapType::ServiceLabelIndex => "CB".into(),
HapType::ServiceLabelNamespace => "CD".into(),
HapType::SetDuration => "D3".into(),
HapType::SetupEndpoints => "118".into(),
HapType::SlatType => "C0".into(),
HapType::SleepDiscoveryMode => "E8".into(),
HapType::SmokeDetected => "76".into(),
HapType::StatusActive => "75".into(),
HapType::StatusFault => "77".into(),
HapType::StatusJammed => "78".into(),
HapType::StatusLowBattery => "79".into(),
HapType::StatusTampered => "7A".into(),
HapType::StreamingStatus => "120".into(),
HapType::SulphurDioxideDensity => "C5".into(),
HapType::SupportedAudioStreamConfiguration => "115".into(),
HapType::SupportedRtpConfiguration => "116".into(),
HapType::SupportedVideoStreamConfiguration => "114".into(),
HapType::SwingMode => "B6".into(),
HapType::TargetAirPurifierState => "A8".into(),
HapType::TargetAirQuality => "AE".into(),
HapType::TargetDoorState => "32".into(),
HapType::TargetFanState => "BF".into(),
HapType::TargetHeaterCoolerState => "B2".into(),
HapType::TargetHeatingCoolingState => "33".into(),
HapType::TargetHorizontalTiltAngle => "7B".into(),
HapType::TargetHumidifierDehumidifierState => "B4".into(),
HapType::TargetPosition => "7C".into(),
HapType::TargetRelativeHumidity => "34".into(),
HapType::TargetSlatState => "BE".into(),
HapType::TargetTemperature => "35".into(),
HapType::TargetTiltAngle => "C2".into(),
HapType::TargetVerticalTiltAngle => "7D".into(),
HapType::TemperatureDisplayUnits => "36".into(),
HapType::ValveType => "D5".into(),
HapType::Version => "37".into(),
HapType::VocDensity => "C8".into(),
HapType::Volume => "119".into(),
HapType::VolumeControlType => "E9".into(),
HapType::VolumeSelector => "EA".into(),
HapType::WaterLevel => "B5".into(),
HapType::SupportedCameraRecordingConfiguration => "205".into(),
HapType::SupportedVideoRecordingConfiguration => "206".into(),
HapType::SupportedAudioRecordingConfiguration => "207".into(),
HapType::SelectedCameraRecordingConfiguration => "209".into(),
HapType::AccessoryInformation => "3E".into(),
HapType::AirPurifier => "BB".into(),
HapType::AirQualitySensor => "8D".into(),
HapType::BatteryService => "96".into(),
HapType::CameraRtpStreamManagement => "110".into(),
HapType::CarbonDioxideSensor => "97".into(),
HapType::CarbonMonoxideSensor => "7F".into(),
HapType::ContactSensor => "80".into(),
HapType::Door => "81".into(),
HapType::Doorbell => "121".into(),
HapType::Fan => "40".into(),
HapType::FanV2 => "B7".into(),
HapType::FilterMaintenance => "BA".into(),
HapType::Faucet => "D7".into(),
HapType::GarageDoorOpener => "41".into(),
HapType::HeaterCooler => "BC".into(),
HapType::HumidifierDehumidifier => "BD".into(),
HapType::HumiditySensor => "82".into(),
HapType::IrrigationSystem => "CF".into(),
HapType::LeakSensor => "83".into(),
HapType::LightSensor => "84".into(),
HapType::Lightbulb => "43".into(),
HapType::LockManagement => "44".into(),
HapType::LockMechanism => "45".into(),
HapType::Microphone => "112".into(),
HapType::MotionSensor => "85".into(),
HapType::OccupancySensor => "86".into(),
HapType::Outlet => "47".into(),
HapType::SecuritySystem => "7E".into(),
HapType::ServiceLabel => "CC".into(),
HapType::Slat => "B9".into(),
HapType::SmokeSensor => "87".into(),
HapType::Speaker => "113".into(),
HapType::StatelessProgrammableSwitch => "89".into(),
HapType::Switch => "49".into(),
HapType::TemperatureSensor => "8A".into(),
HapType::Thermostat => "4A".into(),
HapType::Valve => "D0".into(),
HapType::Window => "8B".into(),
HapType::WindowCovering => "8C".into(),
HapType::Television => "D8".into(),
HapType::InputSource => "D9".into(),
}
}
}
impl Default for HapType {
fn default() -> HapType { HapType::Unknown }
}
impl Serialize for HapType {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(&self.to_string())
}
}
| 31.383663 | 82 | 0.700844 |
4bfaa4c5097aa29a73c450149649991571ff6af4 | 2,078 | /*
* Client Portal Web API
*
* Client Poral Web API
*
* OpenAPI spec version: 1.0.0
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
#[allow(unused_imports)]
use serde_json::Value;
#[derive(Debug, Serialize, Deserialize)]
pub struct Body {
#[serde(rename = "deviceId")]
device_id: Option<String>,
#[serde(rename = "devicename")]
devicename: Option<String>,
#[serde(rename = "enabled")]
enabled: Option<bool>,
#[serde(rename = "uiName")]
ui_name: Option<String>
}
impl Body {
pub fn new() -> Body {
Body {
device_id: None,
devicename: None,
enabled: None,
ui_name: None
}
}
pub fn set_device_id(&mut self, device_id: String) {
self.device_id = Some(device_id);
}
pub fn with_device_id(mut self, device_id: String) -> Body {
self.device_id = Some(device_id);
self
}
pub fn device_id(&self) -> Option<&String> {
self.device_id.as_ref()
}
pub fn reset_device_id(&mut self) {
self.device_id = None;
}
pub fn set_devicename(&mut self, devicename: String) {
self.devicename = Some(devicename);
}
pub fn with_devicename(mut self, devicename: String) -> Body {
self.devicename = Some(devicename);
self
}
pub fn devicename(&self) -> Option<&String> {
self.devicename.as_ref()
}
pub fn reset_devicename(&mut self) {
self.devicename = None;
}
pub fn set_enabled(&mut self, enabled: bool) {
self.enabled = Some(enabled);
}
pub fn with_enabled(mut self, enabled: bool) -> Body {
self.enabled = Some(enabled);
self
}
pub fn enabled(&self) -> Option<&bool> {
self.enabled.as_ref()
}
pub fn reset_enabled(&mut self) {
self.enabled = None;
}
pub fn set_ui_name(&mut self, ui_name: String) {
self.ui_name = Some(ui_name);
}
pub fn with_ui_name(mut self, ui_name: String) -> Body {
self.ui_name = Some(ui_name);
self
}
pub fn ui_name(&self) -> Option<&String> {
self.ui_name.as_ref()
}
pub fn reset_ui_name(&mut self) {
self.ui_name = None;
}
}
| 19.06422 | 67 | 0.630414 |
8f83a50a28444750185118a1d19cecbb549e1a61 | 2,092 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
account_address::AccountAddress,
chain_id::ChainId,
transaction::{RawTransaction, SignedTransaction, TransactionPayload},
};
use anyhow::Result;
use chrono::Utc;
use libra_crypto::{ed25519::*, test_utils::KeyPair, traits::SigningKey};
pub fn create_unsigned_txn(
payload: TransactionPayload,
sender_address: AccountAddress,
sender_sequence_number: u64,
max_gas_amount: u64,
gas_unit_price: u64,
gas_currency_code: String,
txn_expiration: i64, // for compatibility with UTC's timestamp.
chain_id: ChainId,
) -> RawTransaction {
RawTransaction::new(
sender_address,
sender_sequence_number,
payload,
max_gas_amount,
gas_unit_price,
gas_currency_code,
std::time::Duration::new((Utc::now().timestamp() + txn_expiration) as u64, 0),
chain_id,
)
}
pub trait TransactionSigner {
fn sign_txn(&self, raw_txn: RawTransaction) -> Result<SignedTransaction>;
}
/// Craft a transaction request.
pub fn create_user_txn<T: TransactionSigner + ?Sized>(
signer: &T,
payload: TransactionPayload,
sender_address: AccountAddress,
sender_sequence_number: u64,
max_gas_amount: u64,
gas_unit_price: u64,
gas_currency_code: String,
txn_expiration: i64, // for compatibility with UTC's timestamp.
chain_id: ChainId,
) -> Result<SignedTransaction> {
let raw_txn = create_unsigned_txn(
payload,
sender_address,
sender_sequence_number,
max_gas_amount,
gas_unit_price,
gas_currency_code,
txn_expiration,
chain_id,
);
signer.sign_txn(raw_txn)
}
impl TransactionSigner for KeyPair<Ed25519PrivateKey, Ed25519PublicKey> {
fn sign_txn(&self, raw_txn: RawTransaction) -> Result<SignedTransaction> {
let signature = self.private_key.sign(&raw_txn);
Ok(SignedTransaction::new(
raw_txn,
self.public_key.clone(),
signature,
))
}
}
| 28.27027 | 86 | 0.67782 |
ed667c1d7a8fbceb0d75af1efc419489b8f6bec2 | 7,462 | use proc_macro2;
use ast;
use attr;
use matcher;
use syn;
use syn::spanned::Spanned;
use utils;
pub fn derive(input: &ast::Input) -> proc_macro2::TokenStream {
let debug_trait_path = debug_trait_path();
let fmt_path = fmt_path();
let formatter = quote_spanned! {input.span=> __f};
let body = matcher::Matcher::new(matcher::BindingStyle::Ref, input.attrs.is_packed)
.with_field_filter(|f: &ast::Field| !f.attrs.ignore_debug())
.build_arms(input, "__arg", |_, _, arm_name, style, attrs, bis| {
let field_prints = bis.iter().filter_map(|bi| {
if bi.field.attrs.ignore_debug() {
return None;
}
if attrs.debug_transparent() {
return Some(quote_spanned! {arm_name.span()=>
#debug_trait_path::fmt(__arg_0, #formatter)
});
}
let arg_expr = &bi.expr;
let arg_ident = &bi.ident;
let dummy_debug = bi.field.attrs.debug_format_with().map(|format_fn| {
format_with(
bi.field,
&input.attrs.debug_bound(),
&arg_expr,
&arg_ident,
format_fn,
input.generics.clone(),
)
});
let expr = if bi.field.attrs.debug_format_with().is_some() {
quote_spanned! {arm_name.span()=>
&#arg_ident
}
} else {
quote_spanned! {arm_name.span()=>
&&#arg_expr
}
};
let builder = if let Some(ref name) = bi.field.ident {
let name = name.to_string();
quote_spanned! {arm_name.span()=>
#dummy_debug
let _ = __debug_trait_builder.field(#name, #expr);
}
} else {
quote_spanned! {arm_name.span()=>
#dummy_debug
let _ = __debug_trait_builder.field(#expr);
}
};
Some(builder)
});
let method = match style {
ast::Style::Struct => "debug_struct",
ast::Style::Tuple | ast::Style::Unit => "debug_tuple",
};
let method = syn::Ident::new(method, proc_macro2::Span::call_site());
if attrs.debug_transparent() {
quote_spanned! {arm_name.span()=>
#(#field_prints)*
}
} else {
let name = arm_name.to_string();
quote_spanned! {arm_name.span()=>
let mut __debug_trait_builder = #formatter.#method(#name);
#(#field_prints)*
__debug_trait_builder.finish()
}
}
});
let name = &input.ident;
let generics = utils::build_impl_generics(
input,
&debug_trait_path,
needs_debug_bound,
|field| field.debug_bound(),
|input| input.debug_bound(),
);
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
// don't attach a span to prevent issue #58
let match_self = quote!(match *self);
quote_spanned! {input.span=>
#[allow(unused_qualifications)]
#[allow(clippy::unneeded_field_pattern)]
impl #impl_generics #debug_trait_path for #name #ty_generics #where_clause {
fn fmt(&self, #formatter: &mut #fmt_path::Formatter) -> #fmt_path::Result {
#match_self {
#body
}
}
}
}
}
fn needs_debug_bound(attrs: &attr::Field) -> bool {
!attrs.ignore_debug() && attrs.debug_bound().is_none()
}
/// Return the path of the `Debug` trait, that is `::std::fmt::Debug`.
fn debug_trait_path() -> syn::Path {
if cfg!(feature = "use_core") {
parse_quote!(::core::fmt::Debug)
} else {
parse_quote!(::std::fmt::Debug)
}
}
/// Return the path of the `fmt` module, that is `::std::fmt`.
fn fmt_path() -> syn::Path {
if cfg!(feature = "use_core") {
parse_quote!(::core::fmt)
} else {
parse_quote!(::std::fmt)
}
}
/// Return the path of the `PhantomData` type, that is `::std::marker::PhantomData`.
fn phantom_path() -> syn::Path {
if cfg!(feature = "use_core") {
parse_quote!(::core::marker::PhantomData)
} else {
parse_quote!(::std::marker::PhantomData)
}
}
fn format_with(
f: &ast::Field,
bounds: &Option<&[syn::WherePredicate]>,
arg_expr: &proc_macro2::TokenStream,
arg_ident: &syn::Ident,
format_fn: &syn::Path,
mut generics: syn::Generics,
) -> proc_macro2::TokenStream {
let debug_trait_path = debug_trait_path();
let fmt_path = fmt_path();
let phantom_path = phantom_path();
generics
.make_where_clause()
.predicates
.extend(f.attrs.debug_bound().unwrap_or(&[]).iter().cloned());
generics
.params
.push(syn::GenericParam::Lifetime(syn::LifetimeDef::new(
parse_quote!('_derivative),
)));
let where_predicates = generics
.type_params()
.map(|ty| {
let mut bounds = syn::punctuated::Punctuated::new();
bounds.push(syn::TypeParamBound::Lifetime(syn::Lifetime::new(
"'_derivative",
proc_macro2::Span::call_site(),
)));
let path = syn::Path::from(syn::PathSegment::from(ty.ident.clone()));
syn::WherePredicate::Type(syn::PredicateType {
lifetimes: None,
bounded_ty: syn::Type::Path(syn::TypePath { qself: None, path }),
colon_token: Default::default(),
bounds,
})
})
.chain(bounds.iter().flat_map(|b| b.iter().cloned()))
.collect::<Vec<_>>();
generics
.make_where_clause()
.predicates
.extend(where_predicates);
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let ty = f.ty;
// Leave off the type parameter bounds, defaults, and attributes
let phantom = generics.type_params().map(|tp| &tp.ident);
let mut ctor_generics = generics.clone();
*ctor_generics
.lifetimes_mut()
.last()
.expect("There must be a '_derivative lifetime") = syn::LifetimeDef::new(parse_quote!('_));
let (_, ctor_ty_generics, _) = ctor_generics.split_for_impl();
let ctor_ty_generics = ctor_ty_generics.as_turbofish();
// don't attach a span to prevent issue #58
let match_self = quote!(match self.0);
quote_spanned!(format_fn.span()=>
let #arg_ident = {
struct Dummy #impl_generics (&'_derivative #ty, #phantom_path <(#(#phantom,)*)>) #where_clause;
impl #impl_generics #debug_trait_path for Dummy #ty_generics #where_clause {
fn fmt(&self, __f: &mut #fmt_path::Formatter) -> #fmt_path::Result {
#match_self {
this => #format_fn(this, __f)
}
}
}
Dummy #ctor_ty_generics (&&#arg_expr, #phantom_path)
};
)
}
| 33.017699 | 107 | 0.52104 |
2822d5469355e4cd72fc882b6148962aa3f3b1db | 7,450 | use broadcaster::BroadcastChannel;
use crate::sync::Mutex;
/// A barrier enables multiple tasks to synchronize the beginning
/// of some computation.
///
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// #
/// use async_std::sync::{Arc, Barrier};
/// use async_std::task;
///
/// let mut handles = Vec::with_capacity(10);
/// let barrier = Arc::new(Barrier::new(10));
/// for _ in 0..10 {
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
/// handles.push(task::spawn(async move {
/// println!("before wait");
/// c.wait().await;
/// println!("after wait");
/// }));
/// }
/// // Wait for the other futures to finish.
/// for handle in handles {
/// handle.await;
/// }
/// # });
/// ```
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[derive(Debug)]
pub struct Barrier {
state: Mutex<BarrierState>,
wait: BroadcastChannel<(usize, usize)>,
n: usize,
}
// The inner state of a double barrier
#[derive(Debug)]
struct BarrierState {
waker: BroadcastChannel<(usize, usize)>,
count: usize,
generation_id: usize,
}
/// A `BarrierWaitResult` is returned by `wait` when all threads in the `Barrier` have rendezvoused.
///
/// [`wait`]: struct.Barrier.html#method.wait
/// [`Barrier`]: struct.Barrier.html
///
/// # Examples
///
/// ```
/// use async_std::sync::Barrier;
///
/// let barrier = Barrier::new(1);
/// let barrier_wait_result = barrier.wait();
/// ```
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[derive(Debug, Clone)]
pub struct BarrierWaitResult(bool);
impl Barrier {
/// Creates a new barrier that can block a given number of tasks.
///
/// A barrier will block `n`-1 tasks which call [`wait`] and then wake up
/// all tasks at once when the `n`th task calls [`wait`].
///
/// [`wait`]: #method.wait
///
/// # Examples
///
/// ```
/// use std::sync::Barrier;
///
/// let barrier = Barrier::new(10);
/// ```
pub fn new(mut n: usize) -> Barrier {
let waker = BroadcastChannel::new();
let wait = waker.clone();
if n == 0 {
// if n is 0, it's not clear what behavior the user wants.
// in std::sync::Barrier, an n of 0 exhibits the same behavior as n == 1, where every
// .wait() immediately unblocks, so we adopt that here as well.
n = 1;
}
Barrier {
state: Mutex::new(BarrierState {
waker,
count: 0,
generation_id: 1,
}),
n,
wait,
}
}
/// Blocks the current task until all tasks have rendezvoused here.
///
/// Barriers are re-usable after all tasks have rendezvoused once, and can
/// be used continuously.
///
/// A single (arbitrary) task will receive a [`BarrierWaitResult`] that
/// returns `true` from [`is_leader`] when returning from this function, and
/// all other tasks will receive a result that will return `false` from
/// [`is_leader`].
///
/// [`BarrierWaitResult`]: struct.BarrierWaitResult.html
/// [`is_leader`]: struct.BarrierWaitResult.html#method.is_leader
///
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// #
/// use async_std::sync::{Arc, Barrier};
/// use async_std::task;
///
/// let mut handles = Vec::with_capacity(10);
/// let barrier = Arc::new(Barrier::new(10));
/// for _ in 0..10 {
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
/// handles.push(task::spawn(async move {
/// println!("before wait");
/// c.wait().await;
/// println!("after wait");
/// }));
/// }
/// // Wait for the other futures to finish.
/// for handle in handles {
/// handle.await;
/// }
/// # });
/// ```
pub async fn wait(&self) -> BarrierWaitResult {
let mut lock = self.state.lock().await;
let local_gen = lock.generation_id;
lock.count += 1;
if lock.count < self.n {
let mut wait = self.wait.clone();
let mut generation_id = lock.generation_id;
let mut count = lock.count;
drop(lock);
while local_gen == generation_id && count < self.n {
let (g, c) = wait.recv().await.expect("sender has not been closed");
generation_id = g;
count = c;
}
BarrierWaitResult(false)
} else {
lock.count = 0;
lock.generation_id = lock.generation_id.wrapping_add(1);
lock.waker
.send(&(lock.generation_id, lock.count))
.await
.expect("there should be at least one receiver");
BarrierWaitResult(true)
}
}
}
impl BarrierWaitResult {
/// Returns `true` if this task from [`wait`] is the "leader task".
///
/// Only one task will have `true` returned from their result, all other
/// tasks will have `false` returned.
///
/// [`wait`]: struct.Barrier.html#method.wait
///
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// #
/// use async_std::sync::Barrier;
///
/// let barrier = Barrier::new(1);
/// let barrier_wait_result = barrier.wait().await;
/// println!("{:?}", barrier_wait_result.is_leader());
/// # });
/// ```
pub fn is_leader(&self) -> bool {
self.0
}
}
#[cfg(test)]
mod test {
use futures::channel::mpsc::unbounded;
use futures::sink::SinkExt;
use futures::stream::StreamExt;
use crate::sync::{Arc, Barrier};
use crate::task;
#[test]
fn test_barrier() {
// NOTE(dignifiedquire): Based on the test in std, I was seeing some
// race conditions, so running it in a loop to make sure things are
// solid.
for _ in 0..1_000 {
task::block_on(async move {
const N: usize = 10;
let barrier = Arc::new(Barrier::new(N));
let (tx, mut rx) = unbounded();
for _ in 0..N - 1 {
let c = barrier.clone();
let mut tx = tx.clone();
task::spawn(async move {
let res = c.wait().await;
tx.send(res.is_leader()).await.unwrap();
});
}
// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the port
let res = rx.try_next();
assert!(match res {
Err(_err) => true,
_ => false,
});
let mut leader_found = barrier.wait().await.is_leader();
// Now, the barrier is cleared and we should get data.
for _ in 0..N - 1 {
if rx.next().await.unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
});
}
}
}
| 28.764479 | 100 | 0.517718 |
6a6b547e75eefd4b1e5d3bcd35a82f5d742d66d4 | 14,725 | //! Private module for selective re-export.
use crate::{CheckerBuilder, CheckerVisitor, Fingerprint, fingerprint, Model, Property};
use crate::checker::{Checker, EventuallyBits, Expectation, Path};
use dashmap::{DashMap, DashSet};
use nohash_hasher::NoHashHasher;
use parking_lot::{Condvar, Mutex};
use std::collections::{HashMap, VecDeque};
use std::hash::{BuildHasherDefault, Hash};
use std::sync::Arc;
// While this file is currently quite similar to bfs.rs, a refactoring to lift shared
// behavior is being postponed until DPOR is implemented.
pub(crate) struct DfsChecker<M: Model> {
model: Arc<M>,
thread_count: usize,
handles: Vec<std::thread::JoinHandle<()>>,
job_market: Arc<Mutex<JobMarket<M::State>>>,
generated: Arc<DashSet<Fingerprint, BuildHasherDefault<NoHashHasher<u64>>>>,
discoveries: Arc<DashMap<&'static str, Vec<Fingerprint>>>,
}
struct JobMarket<State> { wait_count: usize, jobs: Vec<Job<State>> }
type Job<State> = Vec<(State, Vec<Fingerprint>, EventuallyBits)>;
impl<M> DfsChecker<M>
where M: Model + Send + Sync + 'static,
M::State: Hash + Send + 'static,
{
pub(crate) fn spawn(options: CheckerBuilder<M>) -> Self {
let model = Arc::new(options.model);
let target_generated_count = options.target_generated_count;
let thread_count = options.thread_count;
let visitor = Arc::new(options.visitor);
let property_count = model.properties().len();
let generated = Arc::new(DashSet::default());
for s in model.init_states() { generated.insert(fingerprint(&s)); }
let ebits = {
let mut ebits = EventuallyBits::new();
for (i, p) in model.properties().iter().enumerate() {
if let Property { expectation: Expectation::Eventually, .. } = p {
ebits.insert(i);
}
}
ebits
};
let pending: Vec<_> = model.init_states().into_iter()
.map(|s| {
let fs = vec![fingerprint(&s)];
(s, fs, ebits.clone())
})
.collect();
let discoveries = Arc::new(DashMap::default());
let mut handles = Vec::new();
let has_new_job = Arc::new(Condvar::new());
let job_market = Arc::new(Mutex::new(JobMarket {
wait_count: thread_count,
jobs: vec![pending],
}));
for t in 0..thread_count {
let model = Arc::clone(&model);
let visitor = Arc::clone(&visitor);
let has_new_job = Arc::clone(&has_new_job);
let job_market = Arc::clone(&job_market);
let generated = Arc::clone(&generated);
let discoveries = Arc::clone(&discoveries);
handles.push(std::thread::spawn(move || {
log::debug!("{}: Thread started.", t);
let mut pending = Vec::new();
loop {
// Step 1: Do work.
if pending.is_empty() {
pending = {
let mut job_market = job_market.lock();
match job_market.jobs.pop() {
None => {
// Done if all are waiting.
if job_market.wait_count == thread_count {
log::debug!("{}: No more work. Shutting down... gen={}", t, generated.len());
has_new_job.notify_all();
return
}
// Otherwise more work may become available.
log::trace!("{}: No jobs. Awaiting. blocked={}", t, job_market.wait_count);
has_new_job.wait(&mut job_market);
continue
}
Some(job) => {
job_market.wait_count -= 1;
log::trace!("{}: Job found. size={}, blocked={}", t, job.len(), job_market.wait_count);
job
}
}
};
}
Self::check_block(&*model, &*generated, &mut pending, &*discoveries, &*visitor, 1500);
if discoveries.len() == property_count {
log::debug!("{}: Discovery complete. Shutting down... gen={}", t, generated.len());
let mut job_market = job_market.lock();
job_market.wait_count += 1;
drop(job_market);
has_new_job.notify_all();
return
}
if let Some(target_generated_count) = target_generated_count {
if target_generated_count.get() <= generated.len() {
log::debug!("{}: Reached target generated count. Shutting down... gen={}", t, generated.len());
return;
}
}
// Step 2: Share work.
if pending.len() > 1 && thread_count > 1 {
let mut job_market = job_market.lock();
let pieces = 1 + std::cmp::min(job_market.wait_count as usize, pending.len());
let size = pending.len() / pieces;
for _ in 1..pieces {
log::trace!("{}: Sharing work. blocked={}, size={}", t, job_market.wait_count, size);
job_market.jobs.push(pending.split_off(pending.len() - size));
has_new_job.notify_one();
}
} else if pending.is_empty() {
let mut job_market = job_market.lock();
job_market.wait_count += 1;
}
}
}));
}
DfsChecker {
model,
thread_count,
handles,
job_market,
generated,
discoveries,
}
}
fn check_block(
model: &M,
generated: &DashSet<Fingerprint, BuildHasherDefault<NoHashHasher<u64>>>,
pending: &mut Job<M::State>,
discoveries: &DashMap<&'static str, Vec<Fingerprint>>,
visitor: &Option<Box<dyn CheckerVisitor<M> + Send + Sync>>,
mut max_count: usize)
{
let properties = model.properties();
let mut actions = Vec::new();
loop {
// Done if reached max count.
if max_count == 0 { return }
max_count -= 1;
// Done if none pending.
let (state, fingerprints, mut ebits) = match pending.pop() {
None => return,
Some(pair) => pair,
};
if let Some(visitor) = visitor {
visitor.visit(model, Path::from_fingerprints(
model,
VecDeque::from(fingerprints.clone())));
}
// Done if discoveries found for all properties.
let mut is_awaiting_discoveries = false;
for (i, property) in properties.iter().enumerate() {
if discoveries.contains_key(property.name) { continue }
match property {
Property { expectation: Expectation::Always, condition: always, .. } => {
if !always(model, &state) {
// Races other threads, but that's fine.
discoveries.insert(property.name, fingerprints.clone());
} else {
is_awaiting_discoveries = true;
}
},
Property { expectation: Expectation::Sometimes, condition: sometimes, .. } => {
if sometimes(model, &state) {
// Races other threads, but that's fine.
discoveries.insert(property.name, fingerprints.clone());
} else {
is_awaiting_discoveries = true;
}
},
Property { expectation: Expectation::Eventually, condition: eventually, .. } => {
// The checker early exits after finding discoveries for every property,
// and "eventually" property discoveries are only identifid at terminal
// states, so if we are here it means we are still awaiting a corresponding
// discovery regardless of whether the eventually property is now satisfied
// (i.e. it might be falsifiable via a different path).
is_awaiting_discoveries = true;
if eventually(model, &state) {
ebits.remove(i);
}
}
}
}
if !is_awaiting_discoveries { return }
// Otherwise enqueue newly generated states (with related metadata).
let mut is_terminal = true;
model.actions(&state, &mut actions);
let next_states = actions.drain(..).flat_map(|a| model.next_state(&state, a));
for next_state in next_states {
// Skip if outside boundary.
if !model.within_boundary(&next_state) { continue }
// Skip if already generated.
//
// FIXME: we should really include ebits in the fingerprint here --
// it is possible to arrive at a DAG join with two different ebits
// values, and subsequently treat the fact that some eventually
// property held on the path leading to the first visit as meaning
// that it holds in the path leading to the second visit -- another
// possible false-negative.
let next_fingerprint = fingerprint(&next_state);
if !generated.insert(next_fingerprint) {
// FIXME: arriving at an already-known state may be a loop (in which case it
// could, in a fancier implementation, be considered a terminal state for
// purposes of eventually-property checking) but it might also be a join in
// a DAG, which makes it non-terminal. These cases can be disambiguated (at
// some cost), but for now we just _don't_ treat them as terminal, and tell
// users they need to explicitly ensure model path-acyclicality when they're
// using eventually properties (using a boundary or empty actions or
// whatever).
is_terminal = false;
continue
}
// Otherwise further checking is applicable.
is_terminal = false;
let mut next_fingerprints = Vec::with_capacity(1 + fingerprints.len());
for f in &fingerprints { next_fingerprints.push(*f); }
next_fingerprints.push(next_fingerprint);
pending.push((next_state, next_fingerprints, ebits.clone()));
}
if is_terminal {
for (i, property) in properties.iter().enumerate() {
if ebits.contains(i) {
// Races other threads, but that's fine.
discoveries.insert(property.name, fingerprints.clone());
}
}
}
}
}
}
impl<M> Checker<M> for DfsChecker<M>
where M: Model,
M::State: Hash,
{
fn model(&self) -> &M { &self.model }
fn generated_count(&self) -> usize { self.generated.len() }
fn discoveries(&self) -> HashMap<&'static str, Path<M::State, M::Action>> {
self.discoveries.iter()
.map(|mapref| {
(
<&'static str>::clone(mapref.key()),
Path::from_fingerprints(
self.model(),
VecDeque::from(mapref.value().clone())),
)
})
.collect()
}
fn join(mut self) -> Self {
for h in self.handles.drain(0..) {
h.join().unwrap();
}
self
}
fn is_done(&self) -> bool {
let job_market = self.job_market.lock();
job_market.jobs.is_empty() && job_market.wait_count == self.thread_count
|| self.discoveries.len() == self.model.properties().len()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::*;
use crate::test_util::linear_equation_solver::*;
#[test]
fn visits_states_in_dfs_order() {
let (recorder, accessor) = StateRecorder::new_with_accessor();
LinearEquation { a: 2, b: 10, c: 14 }.checker()
.visitor(recorder)
.spawn_dfs().join();
assert_eq!(
accessor(),
vec![
(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (0, 8), (0, 9), (0, 10), (0, 11), (0, 12), (0, 13),
(0, 14), (0, 15), (0, 16), (0, 17), (0, 18), (0, 19), (0, 20),
(0, 21), (0, 22), (0, 23), (0, 24), (0, 25), (0, 26), (0, 27),
]);
}
#[cfg(not(debug_assertions))] // too slow for debug build
#[test]
fn can_complete_by_enumerating_all_states() {
let checker = LinearEquation { a: 2, b: 4, c: 7 }.checker().spawn_dfs().join();
assert_eq!(checker.is_done(), true);
checker.assert_no_discovery("solvable");
assert_eq!(checker.generated_count(), 256 * 256);
}
#[test]
fn can_complete_by_eliminating_properties() {
let checker = LinearEquation { a: 2, b: 10, c: 14 }.checker().spawn_dfs().join();
checker.assert_properties();
assert_eq!(checker.generated_count(), 55);
// DFS found this example...
assert_eq!(
checker.discovery("solvable").unwrap().into_actions(),
vec![Guess::IncreaseY; 27]); // (2*0 + 10*27) % 256 == 14
// ... but there are of course other solutions, such as the following.
checker.assert_discovery("solvable", vec![
Guess::IncreaseX,
Guess::IncreaseY,
Guess::IncreaseX,
]);
}
}
| 43.436578 | 123 | 0.491273 |
8ffbc87d5a01dd67bed880465041246dc57a90d7 | 481 | // Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use common_datavalues::DataValueArithmeticOperator;
use common_exception::Result;
use crate::scalars::ArithmeticFunction;
use crate::scalars::Function;
pub struct ArithmeticModuloFunction;
impl ArithmeticModuloFunction {
pub fn try_create_func(_display_name: &str) -> Result<Box<dyn Function>> {
ArithmeticFunction::try_create_func(DataValueArithmeticOperator::Modulo)
}
}
| 26.722222 | 80 | 0.779626 |
29749efb8a698c4661fd478a75572d0060074fcd | 2,643 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//use common_arrow::arrow::array::ListArray;
use common_datavalues::arrays::get_list_builder;
use common_datavalues::prelude::*;
use common_exception::Result;
#[test]
fn test_take_random() -> Result<()> {
// Test DFUint16Array
let df_uint16_array = &DFUInt16Array::new_from_iter(1u16..4u16);
// Create TakeRandBranch for the array
let taker = df_uint16_array.take_rand();
// Call APIs defined in trait TakeRandom
assert_eq!(Some(1u16), taker.get(0));
let unsafe_val = unsafe { taker.get_unchecked(0) };
assert_eq!(1u16, unsafe_val);
// Test BooleanArray
let df_bool_array = &DFBooleanArray::new_from_slice(&[true, false, true, false]);
// Create TakeRandBranch for the array
let taker = df_bool_array.take_rand();
assert_eq!(Some(true), taker.get(2));
let unsafe_val = unsafe { taker.get_unchecked(3) };
assert!(!unsafe_val);
// Test ListArray
let mut builder = get_list_builder(&DataType::UInt16, 12, 3);
builder.append_series(&Series::new(vec![1_u16, 2, 3]));
builder.append_series(&Series::new(vec![7_u16, 8, 9]));
let df_list = &builder.finish();
// Create TakeRandBranch for the array
let taker = df_list.take_rand();
let result = taker.get(1).unwrap();
let expected = Series::new(vec![7_u16, 8, 9]);
assert!(result.series_equal(&expected));
// Test get_unchecked
let result = unsafe { taker.get_unchecked(0) };
let expected = Series::new(vec![1_u16, 2, 3]);
assert!(result.series_equal(&expected));
// Test DFStringArray
let mut string_builder = StringArrayBuilder::with_capacity(3);
string_builder.append_value("1a");
string_builder.append_value("2b");
string_builder.append_value("3c");
let df_string_array = &string_builder.finish();
// Create TakeRandBranch for the array
let taker = df_string_array.take_rand();
assert_eq!(Some("1a".as_bytes()), taker.get(0));
// Test get_unchecked
let result = unsafe { taker.get_unchecked(1) };
assert_eq!(b"2b", result);
Ok(())
}
| 38.304348 | 85 | 0.695422 |
2832d17247212018465ef88f2255dff7babdfe23 | 5,566 | // Copyright 2018 The Grin Developers
// Modifications Copyright 2019 The Gotts Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Basic TUI to better output the overall system status and status
//! of various subsystems
use chrono::prelude::Utc;
use cursive::direction::Orientation;
use cursive::theme::BaseColor::{Black, Blue, Cyan, White};
use cursive::theme::Color::Dark;
use cursive::theme::PaletteColor::{
Background, Highlight, HighlightInactive, Primary, Shadow, View,
};
use cursive::theme::{BaseColor, BorderStyle, Color, Theme};
use cursive::traits::Boxable;
use cursive::traits::Identifiable;
use cursive::utils::markup::StyledString;
use cursive::views::{LinearLayout, Panel, StackView, TextView, ViewBox};
use cursive::Cursive;
use std::sync::mpsc;
use crate::built_info;
use crate::servers::Server;
use crate::tui::constants::ROOT_STACK;
use crate::tui::types::{TUIStatusListener, UIMessage};
use crate::tui::{menu, mining, peers, status, version};
pub struct UI {
cursive: Cursive,
ui_rx: mpsc::Receiver<UIMessage>,
ui_tx: mpsc::Sender<UIMessage>,
controller_tx: mpsc::Sender<ControllerMessage>,
}
fn modify_theme(theme: &mut Theme) {
theme.shadow = false;
theme.borders = BorderStyle::Simple;
theme.palette[Background] = Dark(Black);
theme.palette[Shadow] = Dark(Black);
theme.palette[View] = Dark(Black);
theme.palette[Primary] = Dark(White);
theme.palette[Highlight] = Dark(Cyan);
theme.palette[HighlightInactive] = Dark(Blue);
// also secondary, tertiary, TitlePrimary, TitleSecondary
}
impl UI {
/// Create a new UI
pub fn new(controller_tx: mpsc::Sender<ControllerMessage>) -> UI {
let (ui_tx, ui_rx) = mpsc::channel::<UIMessage>();
let mut gotts_ui = UI {
cursive: Cursive::default(),
ui_tx: ui_tx,
ui_rx: ui_rx,
controller_tx: controller_tx,
};
// Create UI objects, etc
let status_view = status::TUIStatusView::create();
let mining_view = mining::TUIMiningView::create();
let peer_view = peers::TUIPeerView::create();
let version_view = version::TUIVersionView::create();
let main_menu = menu::create();
let root_stack = StackView::new()
.layer(version_view)
.layer(mining_view)
.layer(peer_view)
.layer(status_view)
.with_id(ROOT_STACK)
.full_height();
let mut title_string = StyledString::new();
title_string.append(StyledString::styled(
format!(
"Gotts Version {} (proto: {})",
built_info::PKG_VERSION,
Server::protocol_version()
),
Color::Dark(BaseColor::Green),
));
let main_layer = LinearLayout::new(Orientation::Vertical)
.child(Panel::new(TextView::new(title_string).full_width()))
.child(
LinearLayout::new(Orientation::Horizontal)
.child(Panel::new(ViewBox::new(main_menu)))
.child(Panel::new(root_stack)),
);
//set theme
let mut theme = gotts_ui.cursive.current_theme().clone();
modify_theme(&mut theme);
gotts_ui.cursive.set_theme(theme);
gotts_ui.cursive.add_fullscreen_layer(main_layer);
// Configure a callback (shutdown, for the first test)
let controller_tx_clone = gotts_ui.controller_tx.clone();
gotts_ui.cursive.add_global_callback('q', move |_| {
controller_tx_clone
.send(ControllerMessage::Shutdown)
.unwrap();
});
gotts_ui.cursive.set_fps(3);
gotts_ui
}
/// Step the UI by calling into Cursive's step function, then
/// processing any UI messages
pub fn step(&mut self) -> bool {
if !self.cursive.is_running() {
return false;
}
// Process any pending UI messages
while let Some(message) = self.ui_rx.try_iter().next() {
match message {
UIMessage::UpdateStatus(update) => {
status::TUIStatusView::update(&mut self.cursive, &update);
mining::TUIMiningView::update(&mut self.cursive, &update);
peers::TUIPeerView::update(&mut self.cursive, &update);
version::TUIVersionView::update(&mut self.cursive, &update);
}
}
}
// Step the UI
self.cursive.step();
true
}
/// Stop the UI
pub fn stop(&mut self) {
self.cursive.quit();
}
}
pub struct Controller {
rx: mpsc::Receiver<ControllerMessage>,
ui: UI,
}
pub enum ControllerMessage {
Shutdown,
}
impl Controller {
/// Create a new controller
pub fn new() -> Result<Controller, String> {
let (tx, rx) = mpsc::channel::<ControllerMessage>();
Ok(Controller {
rx: rx,
ui: UI::new(tx),
})
}
/// Run the controller
pub fn run(&mut self, server: Server) {
let stat_update_interval = 1;
let mut next_stat_update = Utc::now().timestamp() + stat_update_interval;
while self.ui.step() {
while let Some(message) = self.rx.try_iter().next() {
match message {
ControllerMessage::Shutdown => {
self.ui.stop();
println!("Shutdown in progress, please wait");
server.stop();
return;
}
}
}
if Utc::now().timestamp() > next_stat_update {
next_stat_update = Utc::now().timestamp() + stat_update_interval;
if let Ok(stats) = server.get_server_stats() {
self.ui.ui_tx.send(UIMessage::UpdateStatus(stats)).unwrap();
}
}
}
server.stop();
}
}
| 28.54359 | 75 | 0.695293 |
1aa550c92f51a80dc2b9b067c5c28cd378d7672b | 1,981 | use std::fmt;
use super::*;
#[derive(Debug)]
pub struct Module {
ptr: LLVMModuleRef,
}
impl_llvm_ref!(Module, LLVMModuleRef);
impl Module {
pub fn dump(&self) {
unsafe { LLVMDumpModule(self.as_raw()) };
}
pub fn set_data_layout_str<T: Borrow<Str>>(&mut self, data_layout_str: &T) {
unsafe { LLVMSetDataLayout(self.as_mut(), data_layout_str.borrow().as_ptr()) };
}
pub fn set_data_layout(&mut self, data_layout: &TargetData) {
unsafe { LLVMSetModuleDataLayout(self.as_mut(), data_layout.as_raw()) };
}
pub fn get_target_triple(&self) -> &'static Str {
unsafe { Str::from_ptr(LLVMGetTarget(self.as_raw())) }
}
pub fn set_target_triple<T: Borrow<Str>>(&mut self, triple: &T) {
unsafe { LLVMSetTarget(self.as_mut(), triple.borrow().as_ptr()) };
}
pub fn add_function<T: Borrow<Str>>(
&mut self,
func_ty: &types::Function,
name: &T,
) -> LLVMValueRef {
unsafe { LLVMAddFunction(self.as_mut(), name.borrow().as_ptr(), func_ty.into()) }
}
pub fn print_to_file<T: Borrow<Str>>(&self, path: &T) -> Result<()> {
let mut em: usize = 0;
let em_ptr: *mut usize = &mut em;
unsafe {
LLVMPrintModuleToFile(
self.as_raw(),
path.borrow().as_ptr(),
em_ptr as *mut *mut i8,
);
if em == 0 {
// no error message was set
Ok(())
} else {
Err(String::from_mut(em_ptr as *mut i8))
}
}
}
}
impl fmt::Display for Module {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
write!(
f,
"{}",
String::from_mut(LLVMPrintModuleToString(self.as_raw()))
)
}
}
}
impl Drop for Module {
fn drop(&mut self) {
unsafe { LLVMDisposeModule(self.as_mut()) };
}
}
| 25.727273 | 89 | 0.530035 |
f9e051c66996839d3795d998be44b082968ec8d0 | 7,968 | use {
crate::{stakes::Stakes, vote_account::VoteAccount},
serde::{Deserialize, Serialize},
mundis_sdk::{clock::Epoch, pubkey::Pubkey},
std::{collections::HashMap, sync::Arc},
};
pub type NodeIdToVoteAccounts = HashMap<Pubkey, NodeVoteAccounts>;
pub type EpochAuthorizedVoters = HashMap<Pubkey, Pubkey>;
#[derive(Clone, Serialize, Debug, Deserialize, Default, PartialEq, Eq, AbiExample)]
pub struct NodeVoteAccounts {
pub vote_accounts: Vec<Pubkey>,
pub total_stake: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize, AbiExample, PartialEq)]
pub struct EpochStakes {
stakes: Arc<Stakes>,
total_stake: u64,
node_id_to_vote_accounts: Arc<NodeIdToVoteAccounts>,
epoch_authorized_voters: Arc<EpochAuthorizedVoters>,
}
impl EpochStakes {
pub fn new(stakes: &Stakes, leader_schedule_epoch: Epoch) -> Self {
let epoch_vote_accounts = stakes.vote_accounts();
let (total_stake, node_id_to_vote_accounts, epoch_authorized_voters) =
Self::parse_epoch_vote_accounts(epoch_vote_accounts.as_ref(), leader_schedule_epoch);
Self {
stakes: Arc::new(stakes.clone()),
total_stake,
node_id_to_vote_accounts: Arc::new(node_id_to_vote_accounts),
epoch_authorized_voters: Arc::new(epoch_authorized_voters),
}
}
pub fn stakes(&self) -> &Stakes {
&self.stakes
}
pub fn total_stake(&self) -> u64 {
self.total_stake
}
pub fn node_id_to_vote_accounts(&self) -> &Arc<NodeIdToVoteAccounts> {
&self.node_id_to_vote_accounts
}
pub fn epoch_authorized_voters(&self) -> &Arc<EpochAuthorizedVoters> {
&self.epoch_authorized_voters
}
pub fn vote_account_stake(&self, vote_account: &Pubkey) -> u64 {
self.stakes
.vote_accounts()
.get(vote_account)
.map(|(stake, _)| *stake)
.unwrap_or(0)
}
fn parse_epoch_vote_accounts(
epoch_vote_accounts: &HashMap<Pubkey, (u64, VoteAccount)>,
leader_schedule_epoch: Epoch,
) -> (u64, NodeIdToVoteAccounts, EpochAuthorizedVoters) {
let mut node_id_to_vote_accounts: NodeIdToVoteAccounts = HashMap::new();
let total_stake = epoch_vote_accounts
.iter()
.map(|(_, (stake, _))| stake)
.sum();
let epoch_authorized_voters = epoch_vote_accounts
.iter()
.filter_map(|(key, (stake, account))| {
let vote_state = account.vote_state();
let vote_state = match vote_state.as_ref() {
Err(_) => {
datapoint_warn!(
"parse_epoch_vote_accounts",
(
"warn",
format!("Unable to get vote_state from account {}", key),
String
),
);
return None;
}
Ok(vote_state) => vote_state,
};
if *stake > 0 {
if let Some(authorized_voter) = vote_state
.authorized_voters()
.get_authorized_voter(leader_schedule_epoch)
{
let node_vote_accounts = node_id_to_vote_accounts
.entry(vote_state.node_pubkey)
.or_default();
node_vote_accounts.total_stake += stake;
node_vote_accounts.vote_accounts.push(*key);
Some((*key, authorized_voter))
} else {
None
}
} else {
None
}
})
.collect();
(
total_stake,
node_id_to_vote_accounts,
epoch_authorized_voters,
)
}
}
#[cfg(test)]
pub(crate) mod tests {
use {
super::*, mundis_sdk::account::AccountSharedData,
mundis_vote_program::vote_state::create_account_with_authorized, std::iter,
};
struct VoteAccountInfo {
vote_account: Pubkey,
account: AccountSharedData,
authorized_voter: Pubkey,
}
#[test]
fn test_parse_epoch_vote_accounts() {
let stake_per_account = 100;
let num_vote_accounts_per_node = 2;
// Create some vote accounts for each pubkey
let vote_accounts_map: HashMap<Pubkey, Vec<VoteAccountInfo>> = (0..10)
.map(|_| {
let node_id = mundis_sdk::pubkey::new_rand();
(
node_id,
iter::repeat_with(|| {
let authorized_voter = mundis_sdk::pubkey::new_rand();
VoteAccountInfo {
vote_account: mundis_sdk::pubkey::new_rand(),
account: create_account_with_authorized(
&node_id,
&authorized_voter,
&node_id,
0,
100,
),
authorized_voter,
}
})
.take(num_vote_accounts_per_node)
.collect(),
)
})
.collect();
let expected_authorized_voters: HashMap<_, _> = vote_accounts_map
.iter()
.flat_map(|(_, vote_accounts)| {
vote_accounts
.iter()
.map(|v| (v.vote_account, v.authorized_voter))
})
.collect();
let expected_node_id_to_vote_accounts: HashMap<_, _> = vote_accounts_map
.iter()
.map(|(node_pubkey, vote_accounts)| {
let mut vote_accounts = vote_accounts
.iter()
.map(|v| (v.vote_account))
.collect::<Vec<_>>();
vote_accounts.sort();
let node_vote_accounts = NodeVoteAccounts {
vote_accounts,
total_stake: stake_per_account * num_vote_accounts_per_node as u64,
};
(*node_pubkey, node_vote_accounts)
})
.collect();
// Create and process the vote accounts
let epoch_vote_accounts: HashMap<_, _> = vote_accounts_map
.iter()
.flat_map(|(_, vote_accounts)| {
vote_accounts.iter().map(|v| {
(
v.vote_account,
(stake_per_account, VoteAccount::from(v.account.clone())),
)
})
})
.collect();
let (total_stake, mut node_id_to_vote_accounts, epoch_authorized_voters) =
EpochStakes::parse_epoch_vote_accounts(&epoch_vote_accounts, 0);
// Verify the results
node_id_to_vote_accounts
.iter_mut()
.for_each(|(_, node_vote_accounts)| node_vote_accounts.vote_accounts.sort());
assert!(
node_id_to_vote_accounts.len() == expected_node_id_to_vote_accounts.len()
&& node_id_to_vote_accounts
.iter()
.all(|(k, v)| expected_node_id_to_vote_accounts.get(k).unwrap() == v)
);
assert!(
epoch_authorized_voters.len() == expected_authorized_voters.len()
&& epoch_authorized_voters
.iter()
.all(|(k, v)| expected_authorized_voters.get(k).unwrap() == v)
);
assert_eq!(
total_stake,
vote_accounts_map.len() as u64 * num_vote_accounts_per_node as u64 * 100
);
}
}
| 35.256637 | 97 | 0.512676 |
edff351c8916c912ceec106bcbdfcb14dc390a79 | 2,595 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_exception::ErrorCode;
use common_exception::Result;
use common_planners::PlanNode;
use common_planners::RenameTableEntity;
use common_planners::RenameTablePlan;
use common_tracing::tracing;
use sqlparser::ast::ObjectName;
use crate::sessions::QueryContext;
use crate::sql::statements::AnalyzableStatement;
use crate::sql::statements::AnalyzedResult;
#[derive(Debug, Clone, PartialEq)]
pub struct DfRenameTable {
pub name_map: HashMap<ObjectName, ObjectName>,
}
#[async_trait::async_trait]
impl AnalyzableStatement for DfRenameTable {
#[tracing::instrument(level = "debug", skip(self, ctx), fields(ctx.id = ctx.get_id().as_str()))]
async fn analyze(&self, ctx: Arc<QueryContext>) -> Result<AnalyzedResult> {
let tenant = ctx.get_tenant();
let mut entities = Vec::new();
for (k, v) in &self.name_map {
let (db, table_name) = self.resolve_table(ctx.clone(), k)?;
let (new_db, new_table_name) = self.resolve_table(ctx.clone(), v)?;
entities.push(RenameTableEntity {
if_exists: false,
db,
table_name,
new_db,
new_table_name,
})
}
Ok(AnalyzedResult::SimpleQuery(Box::new(
PlanNode::RenameTable(RenameTablePlan { tenant, entities }),
)))
}
}
impl DfRenameTable {
fn resolve_table(
&self,
ctx: Arc<QueryContext>,
table_name: &ObjectName,
) -> Result<(String, String)> {
let idents = &table_name.0;
match idents.len() {
0 => Err(ErrorCode::SyntaxException("Rename table name is empty")),
1 => Ok((ctx.get_current_database(), idents[0].value.clone())),
2 => Ok((idents[0].value.clone(), idents[1].value.clone())),
_ => Err(ErrorCode::SyntaxException(
"Rename table name must be [`db`].`table`",
)),
}
}
}
| 34.144737 | 100 | 0.640462 |
db9c098d7d6cbebaac1ea20072a492ddf9fb3f94 | 1,706 | ////////////////////////////////////////////////////////////////////////////////////////////////////
// MIT License
//
// Copyright (c) 2021 fontivan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////////////////////////
use crate::nes::architecture::cpu::Cpu;
use crate::nes::architecture::memory::Memory;
use crate::nes::instructions::Opcode;
pub struct Opcode0x2e {}
impl Opcode for Opcode0x2e {
fn get_name() -> String {
return "0x2e".to_string();
}
fn execute(mut _cpu: &mut Cpu, mut _memory: &mut Memory) {
panic!("Instruction '0x2e' is not implemented")
}
}
| 42.65 | 100 | 0.647714 |
f5f82732f2034c9cdc534a7859faaf418a479688 | 1,833 | use super::{parse_next_substitution as pns, Substitution as S};
macro_rules! assert_eq_pnsat {
($lhs:expr, $rhs:expr) => {
assert_eq!(
pns($lhs).and_then(|(f, _)| f.translate().ok()),
$rhs.map(<String as From<&str>>::from)
)
};
}
#[test]
fn test_escape() {
assert_eq!(pns("has no escapes"), None);
assert_eq!(pns("has no escapes, either $"), None);
assert_eq!(pns("*so* has a $$ escape"), Some((S::Escape((11, 13)), " escape")));
assert_eq!(pns("$$ leading escape"), Some((S::Escape((0, 2)), " leading escape")));
assert_eq!(pns("trailing escape $$"), Some((S::Escape((16, 18)), "")));
}
#[test]
fn test_parse() {
macro_rules! assert_pns_eq_sub {
($in_:expr, $kind:ident($arg:expr, $pos:expr)) => {
assert_eq!(pns(concat!($in_, "!")), Some((S::$kind($arg.into(), $pos), "!")))
};
}
assert_pns_eq_sub!("$0", Ordinal(0, (0, 2)));
assert_pns_eq_sub!("$1", Ordinal(1, (0, 2)));
assert_pns_eq_sub!("$9", Ordinal(9, (0, 2)));
assert_pns_eq_sub!("$N", Name("N", (0, 2)));
assert_pns_eq_sub!("$NAME", Name("NAME", (0, 5)));
}
#[test]
fn test_iter() {
use super::iter_subs;
let s = "The $0'th word $$ is: `$WORD` $!\n";
let subs: Vec<_> = iter_subs(s, 0).map(|sub| sub.translate().ok()).collect();
assert_eq!(
subs.iter().map(|ms| ms.as_ref().map(|s| &s[..])).collect::<Vec<_>>(),
vec![Some("{0}"), None, Some("{WORD}")]
);
}
#[test]
fn test_translation() {
assert_eq_pnsat!("$0", Some("{0}"));
assert_eq_pnsat!("$9", Some("{9}"));
assert_eq_pnsat!("$1", Some("{1}"));
assert_eq_pnsat!("$10", Some("{1}"));
assert_eq_pnsat!("$stuff", Some("{stuff}"));
assert_eq_pnsat!("$NAME", Some("{NAME}"));
assert_eq_pnsat!("$PREFIX/bin", Some("{PREFIX}"));
}
| 32.157895 | 89 | 0.539007 |
bb96c4fc2f19aac776fe4426784f94fa7b3e62aa | 16,078 | //! Driver for the Maxim MAX17205 fuel gauge.
//!
//! <https://www.maximintegrated.com/en/products/power/battery-management/MAX17205.html>
//!
//! > The MAX1720x/MAX1721x are ultra-low power stand-alone fuel gauge ICs that
//! > implement the Maxim ModelGauge™ m5 algorithm without requiring host
//! > interaction for configuration. This feature makes the MAX1720x/MAX1721x
//! > excellent pack-side fuel gauges. The MAX17201/MAX17211 monitor a single
//! > cell pack. The MAX17205/MAX17215 monitor and balance a 2S or 3S pack or
//! > monitor a multiple-series cell pack.
//!
//! Usage
//! -----
//!
//! ```rust
//! # use kernel::static_init;
//!
//! // Two i2c addresses are necessary.
//! // Registers 0x000-0x0FF are accessed by address 0x36.
//! // Registers 0x100-0x1FF are accessed by address 0x0B.
//! let max17205_i2c_lower = static_init!(
//! capsules::virtual_i2c::I2CDevice,
//! capsules::virtual_i2c::I2CDevice::new(i2c_bus, 0x36));
//! let max17205_i2c_upper = static_init!(
//! capsules::virtual_i2c::I2CDevice,
//! capsules::virtual_i2c::I2CDevice::new(i2c_bus, 0x0B));
//! let max17205 = static_init!(
//! capsules::max17205::MAX17205<'static>,
//! capsules::max17205::MAX17205::new(max17205_i2c_lower, max17205_i2c_upper,
//! &mut capsules::max17205::BUFFER));
//! max17205_i2c.set_client(max17205);
//!
//! // For userspace.
//! let max17205_driver = static_init!(
//! capsules::max17205::MAX17205Driver<'static>,
//! capsules::max17205::MAX17205Driver::new(max17205));
//! max17205.set_client(max17205_driver);
//! ```
use core::cell::Cell;
use kernel::common::cells::{MapCell, OptionalCell, TakeCell};
use kernel::hil::i2c;
use kernel::{CommandReturn, Driver, ErrorCode, ProcessId, Upcall};
/// Syscall driver number.
use crate::driver;
pub const DRIVER_NUM: usize = driver::NUM::Max17205 as usize;
pub static mut BUFFER: [u8; 8] = [0; 8];
// Addresses 0x000 - 0x0FF, 0x180 - 0x1FF can be written as blocks
// Addresses 0x100 - 0x17F must be written by word
// Addresses 0x000 - 0x0FF should use the i2c_lower device
// Addresses 0x100 - 0x1FF should use the i2c_upper device
enum Registers {
Status = 0x000,
RepCap = 0x005, // Reported capacity, LSB = 0.5 mAh
//RepSOC = 0x006, // Reported capacity, LSB = %/256
FullCapRep = 0x035, // Maximum capacity, LSB = 0.5 mAh
//NPackCfg = 0x1B5, // Pack configuration
NRomID = 0x1BC, //RomID - 64bit unique
//NRSense = 0x1CF, // Sense resistor
Batt = 0x0DA, // Pack voltage, LSB = 1.25mV
Current = 0x00A, // Instantaneous current, LSB = 156.25 uA
Coulomb = 0x04D,
}
#[derive(Clone, Copy, PartialEq)]
enum State {
Idle,
/// Simple read states
SetupReadCoulomb,
ReadCoulomb,
SetupReadStatus,
ReadStatus,
SetupReadSOC,
ReadSOC,
SetupReadCap,
ReadCap,
SetupReadVolt,
ReadVolt,
SetupReadCurrent,
ReadCurrent,
SetupReadRomID,
ReadRomID,
}
pub trait MAX17205Client {
fn status(&self, status: u16, error: Result<(), ErrorCode>);
fn state_of_charge(
&self,
percent: u16,
capacity: u16,
full_capacity: u16,
error: Result<(), ErrorCode>,
);
fn voltage_current(&self, voltage: u16, current: u16, error: Result<(), ErrorCode>);
fn coulomb(&self, coulomb: u16, error: Result<(), ErrorCode>);
fn romid(&self, rid: u64, error: Result<(), ErrorCode>);
}
pub struct MAX17205<'a> {
i2c_lower: &'a dyn i2c::I2CDevice,
i2c_upper: &'a dyn i2c::I2CDevice,
state: Cell<State>,
soc: Cell<u16>,
soc_mah: Cell<u16>,
voltage: Cell<u16>,
buffer: TakeCell<'static, [u8]>,
client: OptionalCell<&'static dyn MAX17205Client>,
}
impl<'a> MAX17205<'a> {
pub fn new(
i2c_lower: &'a dyn i2c::I2CDevice,
i2c_upper: &'a dyn i2c::I2CDevice,
buffer: &'static mut [u8],
) -> MAX17205<'a> {
MAX17205 {
i2c_lower: i2c_lower,
i2c_upper: i2c_upper,
state: Cell::new(State::Idle),
soc: Cell::new(0),
soc_mah: Cell::new(0),
voltage: Cell::new(0),
buffer: TakeCell::new(buffer),
client: OptionalCell::empty(),
}
}
pub fn set_client<C: MAX17205Client>(&self, client: &'static C) {
self.client.set(client);
}
fn setup_read_status(&self) -> Result<(), ErrorCode> {
self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
self.i2c_lower.enable();
buffer[0] = Registers::Status as u8;
self.i2c_lower.write(buffer, 2);
self.state.set(State::SetupReadStatus);
Ok(())
})
}
fn setup_read_soc(&self) -> Result<(), ErrorCode> {
self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
self.i2c_lower.enable();
// Get SOC mAh and percentage
// Write reqcap address
buffer[0] = Registers::RepCap as u8;
self.i2c_lower.write(buffer, 1);
self.state.set(State::SetupReadSOC);
Ok(())
})
}
fn setup_read_curvolt(&self) -> Result<(), ErrorCode> {
self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
self.i2c_lower.enable();
// Get current and voltage
// Write Batt address
buffer[0] = Registers::Batt as u8;
self.i2c_lower.write(buffer, 1);
self.state.set(State::SetupReadVolt);
Ok(())
})
}
fn setup_read_coulomb(&self) -> Result<(), ErrorCode> {
self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
self.i2c_lower.enable();
// Get raw coulomb count.
// Write Coulomb address
buffer[0] = Registers::Coulomb as u8;
self.i2c_lower.write(buffer, 1);
self.state.set(State::SetupReadCoulomb);
Ok(())
})
}
fn setup_read_romid(&self) -> Result<(), ErrorCode> {
self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
self.i2c_upper.enable();
buffer[0] = Registers::NRomID as u8;
self.i2c_upper.write(buffer, 1);
self.state.set(State::SetupReadRomID);
Ok(())
})
}
}
impl i2c::I2CClient for MAX17205<'_> {
fn command_complete(&self, buffer: &'static mut [u8], _error: i2c::Error) {
match self.state.get() {
State::SetupReadStatus => {
// Read status
self.i2c_lower.read(buffer, 2);
self.state.set(State::ReadStatus);
}
State::ReadStatus => {
let status = ((buffer[1] as u16) << 8) | (buffer[0] as u16);
let error = if _error != i2c::Error::CommandComplete {
Err(ErrorCode::NOACK)
} else {
Ok(())
};
self.client.map(|client| client.status(status, error));
self.buffer.replace(buffer);
self.i2c_lower.disable();
self.state.set(State::Idle);
}
State::SetupReadSOC => {
// Write of SOC memory address complete, now issue read
self.i2c_lower.read(buffer, 4);
self.state.set(State::ReadSOC);
}
State::ReadSOC => {
// Read of SOC memory address complete
self.soc_mah
.set(((buffer[1] as u16) << 8) | (buffer[0] as u16));
self.soc.set(((buffer[3] as u16) << 8) | (buffer[2] as u16));
self.buffer.replace(buffer);
// Now issue write of memory address of full capacity
// Setup read capacity
self.buffer.take().map(|selfbuf| {
// Get SOC mAh and percentage
// Write reqcap address
selfbuf[0] = ((Registers::FullCapRep as u8) & 0xFF) as u8;
self.i2c_lower.write(selfbuf, 1);
self.state.set(State::SetupReadCap);
});
}
State::SetupReadCap => {
// Now issue read
self.i2c_lower.read(buffer, 2);
self.state.set(State::ReadCap);
}
State::ReadCap => {
let full_mah = ((buffer[1] as u16) << 8) | (buffer[0] as u16);
let error = if _error != i2c::Error::CommandComplete {
Err(ErrorCode::NOACK)
} else {
Ok(())
};
self.client.map(|client| {
client.state_of_charge(self.soc.get(), self.soc_mah.get(), full_mah, error);
});
self.buffer.replace(buffer);
self.i2c_lower.disable();
self.state.set(State::Idle);
}
State::SetupReadCoulomb => {
// Write of voltage memory address complete, now issue read
self.i2c_lower.read(buffer, 2);
self.state.set(State::ReadCoulomb);
}
State::ReadCoulomb => {
// Read of voltage memory address complete
let coulomb = ((buffer[1] as u16) << 8) | (buffer[0] as u16);
let error = if _error != i2c::Error::CommandComplete {
Err(ErrorCode::NOACK)
} else {
Ok(())
};
self.client.map(|client| {
client.coulomb(coulomb, error);
});
self.buffer.replace(buffer);
self.i2c_lower.disable();
self.state.set(State::Idle);
}
State::SetupReadVolt => {
// Write of voltage memory address complete, now issue read
self.i2c_lower.read(buffer, 2);
self.state.set(State::ReadVolt);
}
State::ReadVolt => {
// Read of voltage memory address complete
self.voltage
.set(((buffer[1] as u16) << 8) | (buffer[0] as u16));
self.buffer.replace(buffer);
// Now issue write of memory address of current
// Setup read capacity
self.buffer.take().map(|selfbuf| {
selfbuf[0] = ((Registers::Current as u8) & 0xFF) as u8;
self.i2c_lower.write(selfbuf, 1);
self.state.set(State::SetupReadCurrent);
});
}
State::SetupReadCurrent => {
// Now issue read
self.i2c_lower.read(buffer, 2);
self.state.set(State::ReadCurrent);
}
State::ReadCurrent => {
let current = ((buffer[1] as u16) << 8) | (buffer[0] as u16);
let error = if _error != i2c::Error::CommandComplete {
Err(ErrorCode::NOACK)
} else {
Ok(())
};
self.client
.map(|client| client.voltage_current(self.voltage.get(), current, error));
self.buffer.replace(buffer);
self.i2c_lower.disable();
self.state.set(State::Idle);
}
State::SetupReadRomID => {
self.i2c_upper.read(buffer, 8);
self.state.set(State::ReadRomID);
}
State::ReadRomID => {
// u64 from 8 bytes
let rid = buffer
.iter()
.take(8)
.enumerate()
.fold(0u64, |rid, (i, b)| rid | ((*b as u64) << i * 8));
self.buffer.replace(buffer);
let error = if _error != i2c::Error::CommandComplete {
Err(ErrorCode::NOACK)
} else {
Ok(())
};
self.client.map(|client| client.romid(rid, error));
self.i2c_upper.disable();
self.state.set(State::Idle);
}
_ => {}
}
}
}
pub struct MAX17205Driver<'a> {
max17205: &'a MAX17205<'a>,
callback: MapCell<Upcall>,
}
impl<'a> MAX17205Driver<'a> {
pub fn new(max: &'a MAX17205) -> MAX17205Driver<'a> {
MAX17205Driver {
max17205: max,
callback: MapCell::new(Upcall::default()),
}
}
}
impl MAX17205Client for MAX17205Driver<'_> {
fn status(&self, status: u16, error: Result<(), ErrorCode>) {
self.callback
.map(|cb| cb.schedule(kernel::into_statuscode(error), status as usize, 0));
}
fn state_of_charge(
&self,
percent: u16,
capacity: u16,
full_capacity: u16,
error: Result<(), ErrorCode>,
) {
self.callback.map(|cb| {
cb.schedule(
kernel::into_statuscode(error),
percent as usize,
(capacity as usize) << 16 | (full_capacity as usize),
);
});
}
fn voltage_current(&self, voltage: u16, current: u16, error: Result<(), ErrorCode>) {
self.callback.map(|cb| {
cb.schedule(
kernel::into_statuscode(error),
voltage as usize,
current as usize,
)
});
}
fn coulomb(&self, coulomb: u16, error: Result<(), ErrorCode>) {
self.callback
.map(|cb| cb.schedule(kernel::into_statuscode(error), coulomb as usize, 0));
}
fn romid(&self, rid: u64, error: Result<(), ErrorCode>) {
self.callback.map(|cb| {
cb.schedule(
kernel::into_statuscode(error),
(rid & 0xffffffff) as usize,
(rid >> 32) as usize,
)
});
}
}
impl Driver for MAX17205Driver<'_> {
/// Setup callback.
///
/// ### `subscribe_num`
///
/// - `0`: Setup a callback for when all events complete or data is ready.
fn subscribe(
&self,
subscribe_num: usize,
callback: Upcall,
_app_id: ProcessId,
) -> Result<Upcall, (Upcall, ErrorCode)> {
match subscribe_num {
0 => {
if let Some(prev) = self.callback.replace(callback) {
Ok(prev)
} else {
// TODO(alevy): This should never happen because we start with a full MapCell
// and only ever replace it. This is just defensive until this module becomes
// multi-user, which will preclude the need for a MapCell in the first place.
Ok(Upcall::default())
}
}
// default
_ => Err((callback, ErrorCode::NOSUPPORT)),
}
}
/// Setup and read the MAX17205.
///
/// ### `command_num`
///
/// - `0`: Driver check.
/// - `1`: Read the current status of the MAX17205.
/// - `2`: Read the current state of charge percent.
/// - `3`: Read the current voltage and current draw.
/// - `4`: Read the raw coulomb count.
/// - `5`: Read the unique 64 bit RomID.
fn command(&self, command_num: usize, _data: usize, _: usize, _: ProcessId) -> CommandReturn {
match command_num {
0 => CommandReturn::success(),
// read status
1 => self.max17205.setup_read_status().into(),
// get soc
2 => self.max17205.setup_read_soc().into(),
// get voltage & current
3 => self.max17205.setup_read_curvolt().into(),
// get raw coulombs
4 => self.max17205.setup_read_coulomb().into(),
//
5 => self.max17205.setup_read_romid().into(),
// default
_ => CommandReturn::failure(ErrorCode::NOSUPPORT),
}
}
}
| 32.612576 | 98 | 0.520401 |
ac51c8b84c66be35a08c8de4152874030d1abdec | 8,749 | //! Pixel based fonts
//!
//! # Examples
//!
//! The examples below use the [`Font6x8`] font and the [`text_6x8`] macro, however any of the [font
//! types in this module](#types) or [`text_*` macros](../index.html#macros) can be substituted.
//!
//! ## Write some text to the screen at the default `(0, 0)` position
//!
//! ```rust
//! use embedded_graphics::prelude::*;
//! use embedded_graphics::fonts::Font6x8;
//! use embedded_graphics::text_6x8;
//! # use embedded_graphics::mock_display::MockDisplay;
//! # use embedded_graphics::pixelcolor::BinaryColor;
//! # let mut display: MockDisplay<BinaryColor> = MockDisplay::default();
//!
//! // Use struct methods directly
//! display.draw(Font6x8::render_str("Hello Rust!"));
//!
//! // Use a macro instead
//! display.draw(text_6x8!("Hello Rust!"));
//! ```
//!
//! ## Translate text by (20px, 30px)
//!
//! ```rust
//! use embedded_graphics::prelude::*;
//! use embedded_graphics::fonts::Font6x8;
//! # use embedded_graphics::mock_display::MockDisplay;
//! # use embedded_graphics::pixelcolor::BinaryColor;
//! # let mut display: MockDisplay<BinaryColor> = MockDisplay::default();
//!
//! display.draw(
//! Font6x8::render_str("Hello Rust!").translate(Point::new(20, 30))
//! );
//! ```
//!
//! ## Add some styling to the text
//!
//! Use [any method provided by the `WithStyle` trait](../style/trait.WithStyle.html#required-methods).
//! Properties like `fill_color` or `stroke_color` passed to the `text_6x8` macro are converted into method
//! calls verbatim.
//!
//! ```rust
//! use embedded_graphics::prelude::*;
//! use embedded_graphics::text_6x8;
//! use embedded_graphics::fonts::Font6x8;
//! use embedded_graphics::pixelcolor::Rgb565;
//! # use embedded_graphics::mock_display::MockDisplay;
//! # let mut display = MockDisplay::default();
//!
//! display.draw(text_6x8!(
//! "Hello Rust!",
//! fill_color = Some(Rgb565::BLUE),
//! stroke_color = Some(Rgb565::YELLOW)
//! ));
//!
//! display.draw(
//! Font6x8::render_str("Hello Rust!")
//! .translate(Point::new(20, 30))
//! .fill_color(Some(Rgb565::BLUE))
//! .stroke_color(Some(Rgb565::YELLOW)),
//! );
//! ```
//!
//! ## Use `write!()` and arrayvec to render a formatted string
//!
//! This example uses arrayvec's [`ArrayString`] to render a floating point value using the
//! [`write!()`] macro. These strings have a fixed length, but allow the use of Rust's builtin
//! string formatting.
//!
//! ```rust
//! use arrayvec::ArrayString;
//! use core::fmt::Write;
//! use embedded_graphics::fonts::Font6x8;
//! use embedded_graphics::pixelcolor::Rgb565;
//! use embedded_graphics::prelude::*;
//! use embedded_graphics::text_6x8;
//! # use embedded_graphics::mock_display::MockDisplay;
//! # let mut display = MockDisplay::default();
//!
//! let value = 12.34567;
//!
//! // Create a fixed buffer of length 12
//! let mut buf = ArrayString::<[_; 12]>::new();
//!
//! // Output `Value: 12.35`
//! write!(&mut buf, "Value: {:.2}", value).expect("Failed to write to buffer");
//!
//! display.draw(text_6x8!(
//! &buf,
//! fill_color = Some(Rgb565::BLUE),
//! stroke_color = Some(Rgb565::YELLOW)
//! ));
//! ```
//!
//! [`text_6x8`]: ../macro.text_6x8.html
//! [`Font6x8`]: ./type.Font6x8.html
//! [`ArrayString`]: https://docs.rs/arrayvec/0.4.11/arrayvec/struct.ArrayString.html
//! [`write!()`]: https://doc.rust-lang.org/nightly/std/macro.write.html
mod font12x16;
mod font6x12;
mod font6x8;
mod font8x16;
pub mod font_builder;
pub use self::font12x16::Font12x16;
pub use self::font6x12::Font6x12;
pub use self::font6x8::Font6x8;
pub use self::font8x16::Font8x16;
use crate::geometry::Dimensions;
use crate::pixelcolor::PixelColor;
use crate::style::WithStyle;
/// Common methods for all fonts
pub trait Font<'a, C>: WithStyle<C> + Dimensions
where
C: PixelColor,
{
/// Render a string in the implementing font's typeface.
///
/// Defaults to 1u8 for stroke_color and 0u8 for fill_color
///
/// ```rust
/// use embedded_graphics::prelude::*;
/// use embedded_graphics::fonts::Font6x8;
/// use embedded_graphics::pixelcolor::Rgb565;
/// # use embedded_graphics::mock_display::MockDisplay as Display;
///
/// fn main() {
/// let mut disp = Display::default();
/// // Render a string with a red stroke
/// let text = Font6x8::render_str("Hello world")
/// .style(Style::stroke_color(Rgb565::RED));
///
/// disp.draw(text);
/// }
/// ```
fn render_str(chars: &'a str) -> Self;
}
/// Internal macro used to implement `text_*` on fonts. Do not use directly!
#[doc(hidden)]
#[macro_export]
macro_rules! impl_text {
($Font:ident, $text:expr $(, $style_key:ident = $style_value:expr )* $(,)?) => {{
#[allow(unused_imports)]
use $crate::style::WithStyle;
$crate::fonts::$Font::render_str($text)
$( .$style_key($style_value) )*
}};
}
/// Render text using the [`Font6x8`](./fonts/type.Font6x8.html) font
///
/// ```rust
/// use embedded_graphics::{text_6x8, prelude::*, fonts::Font6x8, pixelcolor::Rgb565};
///
/// let text: Font6x8<Rgb565> = text_6x8!("Hello world!");
/// let styled_text: Font6x8<Rgb565> = text_6x8!(
/// "Hello world!",
/// stroke_color = Some(Rgb565::RED),
/// fill_color = Some(Rgb565::GREEN)
/// );
/// ```
///
/// Style properties like `stroke` map to the method calls on the
/// [`WithStyle`](./style/trait.WithStyle.html) trait.
#[macro_export]
macro_rules! text_6x8 {
($text:expr $(, $style_key:ident = $style_value:expr )* $(,)?) => {
$crate::impl_text!(Font6x8, $text $(, $style_key = $style_value )*)
};
}
/// Render text using the [`Font6x12`](./fonts/type.Font6x12.html) font
///
/// ```rust
/// use embedded_graphics::{text_6x12, prelude::*, fonts::Font6x12, pixelcolor::Rgb565};
///
/// let text: Font6x12<Rgb565> = text_6x12!("Hello world!");
/// let styled_text: Font6x12<Rgb565> = text_6x12!(
/// "Hello world!",
/// stroke_color = Some(Rgb565::RED),
/// fill_color = Some(Rgb565::GREEN)
/// );
/// ```
///
/// Style properties like `stroke` map to the method calls on the
/// [`WithStyle`](./style/trait.WithStyle.html) trait.
#[macro_export]
macro_rules! text_6x12 {
($text:expr $(, $style_key:ident = $style_value:expr )* $(,)?) => {
$crate::impl_text!(Font6x12, $text $(, $style_key = $style_value )*)
};
}
/// Render text using the [`Font8x16`](./fonts/type.Font8x16.html) font
///
/// ```rust
/// use embedded_graphics::{text_8x16, prelude::*, fonts::Font8x16, pixelcolor::Rgb565};
///
/// let text: Font8x16<Rgb565> = text_8x16!("Hello world!");
/// let styled_text: Font8x16<Rgb565> = text_8x16!(
/// "Hello world!",
/// stroke_color = Some(Rgb565::RED),
/// fill_color = Some(Rgb565::GREEN)
/// );
/// ```
///
/// Style properties like `stroke` map to the method calls on the
/// [`WithStyle`](./style/trait.WithStyle.html) trait.
#[macro_export]
macro_rules! text_8x16 {
($text:expr $(, $style_key:ident = $style_value:expr )* $(,)?) => {
$crate::impl_text!(Font8x16, $text $(, $style_key = $style_value )*)
};
}
/// Render text using the [`Font12x16`](./fonts/type.Font12x16.html) font
///
/// ```rust
/// use embedded_graphics::{text_12x16, prelude::*, fonts::Font12x16, pixelcolor::Rgb565};
///
/// let text: Font12x16<Rgb565> = text_12x16!("Hello world!");
/// let styled_text: Font12x16<Rgb565> = text_12x16!(
/// "Hello world!",
/// stroke_color = Some(Rgb565::RED),
/// fill_color = Some(Rgb565::GREEN)
/// );
/// ```
///
/// Style properties like `stroke` map to the method calls on the
/// [`WithStyle`](./style/trait.WithStyle.html) trait.
#[macro_export]
macro_rules! text_12x16 {
($text:expr $(, $style_key:ident = $style_value:expr )* $(,)?) => {
$crate::impl_text!(Font12x16, $text $(, $style_key = $style_value )*)
};
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pixelcolor::{BinaryColor, Rgb565, RgbColor};
#[test]
fn font_macros() {
let _text: Font6x8<BinaryColor> = text_6x8!("Hello!");
let _text: Font6x12<BinaryColor> = text_6x12!("Hello!");
let _text: Font8x16<BinaryColor> = text_8x16!("Hello!");
let _text: Font12x16<BinaryColor> = text_12x16!("Hello!");
}
#[test]
fn styled_text() {
let _text: Font6x8<Rgb565> = text_6x8!("Hello!", stroke_color = Some(Rgb565::RED));
let _text: Font6x12<Rgb565> = text_6x12!("Hello!", stroke_color = Some(Rgb565::GREEN));
let _text: Font8x16<Rgb565> = text_8x16!("Hello!", stroke_color = Some(Rgb565::BLUE));
let _text: Font12x16<Rgb565> = text_12x16!("Hello!", stroke_color = Some(Rgb565::YELLOW));
}
}
| 33.015094 | 107 | 0.628643 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.