hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
2186dbd5f021653a0563e480c3c3feee453882ab | 1,013 | use crate::sqlx_types::{sqlite::SqliteRow, SqlitePool};
use sea_query::{SelectStatement, SqliteQueryBuilder};
use crate::debug_print;
pub struct Executor {
pool: SqlitePool,
}
pub trait IntoExecutor {
fn into_executor(self) -> Executor;
}
impl IntoExecutor for SqlitePool {
fn into_executor(self) -> Executor {
Executor { pool: self }
}
}
impl Executor {
pub async fn fetch_all(&self, select: SelectStatement) -> Vec<SqliteRow> {
let (sql, values) = select.build(SqliteQueryBuilder);
debug_print!("{}, {:?}", sql, values);
panic!("This is a mock Executor");
}
pub async fn fetch_one(&self, select: SelectStatement) -> SqliteRow {
let (sql, values) = select.build(SqliteQueryBuilder);
debug_print!("{}, {:?}", sql, values);
panic!("This is a mock Executor");
}
pub async fn fetch_all_raw(&self, sql: String) -> Vec<SqliteRow> {
debug_print!("{}", sql);
panic!("This is a mock Executor");
}
}
| 24.707317 | 78 | 0.625864 |
f4f5d32a90d84b7c980d9e44467bc5c551ba0de3 | 5,383 | // Copyright 2020 Veil Rust Developers
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
use quiche;
use rocksdb;
use serde_json;
use std::{error, fmt, io, net, num, sync::mpsc};
use veil_core_rpc;
#[derive(Debug)]
pub enum ErrorKind {
Io(io::Error),
ParseInt(num::ParseIntError),
ParseFloat(num::ParseFloatError),
AddrParse(net::AddrParseError),
RocksDb(rocksdb::Error),
SerdeJson(serde_json::Error),
VeilCoreRpc(veil_core_rpc::Error),
MpscRecvError(mpsc::RecvError),
MpscSendError(mpsc::SendError<_>),
Quic(quiche::Error),
}
impl From<&ErrorKind> for i32 {
fn from(e: &ErrorKind) -> Self {
match e {
ErrorKind::Io(_) => 1,
ErrorKind::ParseInt(_) => 2,
ErrorKind::ParseFloat(_) => 3,
ErrorKind::AddrParse(_) => 4,
ErrorKind::RocksDb(_) => 5,
ErrorKind::SerdeJson(_) => 6,
ErrorKind::VeilCoreRpc(_) => 7,
ErrorKind::MpscRecvError(_) => 8,
ErrorKind::MpscSendError(_) => 9,
ErrorKind::Quic(_) => 10,
}
}
}
#[derive(Debug)]
pub struct Error(Box<ErrorKind>);
impl Error {
pub fn new(kind: ErrorKind) -> Error {
Error(Box::new(kind))
}
pub fn source(&self) -> &ErrorKind {
&self.0
}
pub fn exit_code(&self) -> i32 {
i32::from(self.0.as_ref())
}
pub fn cause(&self) -> String {
format!("{}", self)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self.0 {
ErrorKind::Io(ref e) => e.fmt(f),
ErrorKind::ParseInt(ref e) => e.fmt(f),
ErrorKind::ParseFloat(ref e) => e.fmt(f),
ErrorKind::AddrParse(ref e) => e.fmt(f),
ErrorKind::RocksDb(ref e) => e.fmt(f),
ErrorKind::VeilCoreRpc(ref e) => e.fmt(f),
ErrorKind::SerdeJson(ref e) => e.fmt(f),
ErrorKind::MpscRecvError(ref e) => e.fmt(f),
ErrorKind::MpscSendError(ref e) => e.fmt(f),
ErrorKind::Quic(ref e) => e.fmt(f),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self.0 {
ErrorKind::Io(ref e) => Some(e),
ErrorKind::ParseInt(ref e) => Some(e),
ErrorKind::ParseFloat(ref e) => Some(e),
ErrorKind::AddrParse(ref e) => Some(e),
ErrorKind::RocksDb(ref e) => Some(e),
ErrorKind::VeilCoreRpc(ref e) => Some(e),
ErrorKind::SerdeJson(ref e) => Some(e),
ErrorKind::MpscRecvError(ref e) => Some(e),
ErrorKind::MpscSendError(ref e) => Some(e),
ErrorKind::Quic(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::new(ErrorKind::Io(e))
}
}
impl From<num::ParseIntError> for Error {
fn from(e: num::ParseIntError) -> Self {
Error::new(ErrorKind::ParseInt(e))
}
}
impl From<num::ParseFloatError> for Error {
fn from(e: num::ParseFloatError) -> Self {
Error::new(ErrorKind::ParseFloat(e))
}
}
impl From<std::net::AddrParseError> for Error {
fn from(e: net::AddrParseError) -> Self {
Error::new(ErrorKind::AddrParse(e))
}
}
impl From<rocksdb::Error> for Error {
fn from(e: rocksdb::Error) -> Self {
Error::new(ErrorKind::RocksDb(e))
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::new(ErrorKind::SerdeJson(e))
}
}
impl From<veil_core_rpc::Error> for Error {
fn from(e: veil_core_rpc::Error) -> Self {
Error::new(ErrorKind::VeilCoreRpc(e))
}
}
impl From<mpsc::RecvError> for Error {
fn from(e: mpsc::RecvError) -> Self {
Error::new(ErrorKind::MpscRecvError(e))
}
}
impl<T> From<mpsc::SendError<T>> for Error {
fn from(e: mpsc::SendError<T>) -> Self {
Error::new(ErrorKind::MpscSendError(e))
}
}
impl From<quiche::Error> for Error {
fn from(e: quiche::Error) -> Self {
Error::new(ErrorKind::Quic(e))
}
}
| 30.585227 | 79 | 0.612112 |
9057c02d3d8b502f1e3c04bcbf42bb33718cf0d2 | 499 | use log::*;
use mips::registers::cp0;
pub fn read_epoch() -> u64 {
// TODO: support RTC
0
}
/// Enable timer interrupt
pub fn init() {
// Enable supervisor timer interrupt
cp0::status::enable_hard_int5(); // IP(7), timer interrupt
cp0::count::write_u32(0);
set_next();
info!("timer: init end");
}
/// Set the next timer interrupt
pub fn set_next() {
// 100Hz @ QEMU
let timebase = 250000;
cp0::count::write_u32(0);
cp0::compare::write_u32(timebase);
}
| 19.96 | 62 | 0.619238 |
1d5c6ef36b0ea44d313e2b3cb6c51a8fc2c5a44c | 2,156 | use crate::*;
use bson::Document;
use serde_json::Value;
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct InsertManyArgs {
db_name: String,
collection_name: String,
docs: Vec<Value>,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct InsertOneArgs {
db_name: String,
collection_name: String,
doc: Value,
}
pub fn insert_one(command: Command) -> util::AsyncJsonOp<bson::Bson> {
let fut = async move {
let client = command.get_client();
let data = command.data.first();
let args: InsertOneArgs =
serde_json::from_slice(data.ok_or("Missing arguments for insertOne")?.as_ref())
.map_err(|e| e.to_string())?;
let db_name = args.db_name;
let collection_name = args.collection_name;
let doc = util::json_to_document(args.doc).ok_or("doc can not be null")?;
let database = client.database(&db_name);
let collection = database.collection(&collection_name);
let insert_result = collection
.insert_one(doc, None)
.map_err(|e| e.to_string())?;
Ok(insert_result.inserted_id)
};
fut.boxed()
}
pub fn insert_many(command: Command) -> util::AsyncJsonOp<Vec<bson::Bson>> {
let fut = async move {
let client = command.get_client();
let data = command.data.first();
let args: InsertManyArgs =
serde_json::from_slice(data.ok_or("Missing arguments for insertMany")?.as_ref())
.map_err(|e| e.to_string())?;
let db_name = args.db_name;
let collection_name = args.collection_name;
let docs: Vec<Document> = util::jsons_to_documents(args.docs);
let database = client.database(&db_name);
let collection = database.collection(&collection_name);
let insert_result = collection
.insert_many(docs, None)
.map_err(|e| e.to_string())?;
let ids: Vec<bson::Bson> = insert_result
.inserted_ids
.iter()
.map(|(_, id)| id.to_owned())
.collect();
Ok(ids)
};
fut.boxed()
}
| 31.246377 | 92 | 0.608998 |
1ec04c1993d26fefcd19786d7f2bc7434fdfd474 | 8,011 | pub use crate::definitions::entity::{Player, PlayerTypeDef};
use crate::{app::SqlPool, definitions::returned::ReturnedPlayer};
use actix_web::Either;
use anyhow::{Context, Result};
use sea_query::{
ColumnDef, Expr, OnConflict, Order, PostgresDriver, PostgresQueryBuilder, Query, Table,
};
use tokio_postgres::Row;
use uuid::Uuid;
use super::{ColumnsDef, Entity};
impl Default for Player {
fn default() -> Self {
Self {
uuid: Uuid::new_v4(),
hexes: 0,
last_username: "".to_string(),
last_seen: chrono::Utc::now(),
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
}
}
}
impl From<Row> for Player {
fn from(row: Row) -> Self {
Self {
uuid: row.get("uuid"),
hexes: row.get("hexes"),
last_username: row.get("last_username"),
last_seen: row.get("last_seen"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
}
}
}
impl From<&Player> for ReturnedPlayer {
fn from(player: &Player) -> Self {
Self {
uuid: player.uuid,
hexes: player.hexes,
last_username: player.last_username.clone(),
last_seen: player.last_seen,
created_at: player.created_at,
updated_at: player.updated_at,
}
}
}
impl ColumnsDef<PlayerTypeDef> for PlayerTypeDef {
fn columns() -> std::slice::Iter<'static, Self> {
static COLUMNS: [PlayerTypeDef; 6] = [
PlayerTypeDef::Uuid,
PlayerTypeDef::Hexes,
PlayerTypeDef::LastUsername,
PlayerTypeDef::LastSeen,
PlayerTypeDef::CreatedAt,
PlayerTypeDef::UpdatedAt,
];
COLUMNS.iter()
}
fn def(&self) -> ColumnDef {
let mut column = ColumnDef::new(*self);
match *self {
PlayerTypeDef::Uuid => column.uuid().not_null().primary_key(),
PlayerTypeDef::Hexes => column.integer().not_null().default(0),
PlayerTypeDef::LastUsername => column.string_len(16).not_null(),
PlayerTypeDef::LastSeen => column
.date_time()
.not_null()
.extra("DEFAULT NOW()".to_string()),
PlayerTypeDef::CreatedAt => column
.date_time()
.not_null()
.extra("DEFAULT NOW()".to_string()),
PlayerTypeDef::UpdatedAt => column
.date_time()
.not_null()
.extra("DEFAULT NOW()".to_string()),
_ => unreachable!(),
};
column
}
}
fn create_internally(entity: &Player) -> sea_query::InsertStatement {
let mut statement = Query::insert();
statement
.into_table(PlayerTypeDef::Table)
.columns(PlayerTypeDef::columns().copied().collect::<Vec<_>>())
.values_panic(vec![
entity.uuid.into(),
entity.hexes.into(),
entity.last_username.as_str().into(),
entity.last_seen.into(),
entity.created_at.into(),
entity.updated_at.into(),
]);
statement
}
#[async_trait::async_trait]
impl Entity<Player, Either<Uuid, String>, SqlPool> for Player {
async fn up(pool: &SqlPool) -> Result<()> {
let mut sql = Table::create();
sql.table(PlayerTypeDef::Table).if_not_exists();
for column in PlayerTypeDef::columns() {
sql.col(&mut column.def());
}
let client = pool
.get()
.await
.context("Could not get client from pool.")?;
client
.execute(&sql.build(PostgresQueryBuilder), &[])
.await
.context("Could not create the Player table.")?;
Ok(())
}
async fn find(pool: &SqlPool, id: Either<Uuid, String>) -> Result<Option<Player>> {
let expr = match id {
Either::Left(uuid) => Expr::col(PlayerTypeDef::Uuid).eq(uuid),
Either::Right(username) => Expr::col(PlayerTypeDef::LastUsername).eq(username),
};
let (sql, values) = Query::select()
.columns(PlayerTypeDef::columns().copied().collect::<Vec<_>>())
.from(PlayerTypeDef::Table)
.limit(1)
.and_where(expr)
.build(PostgresQueryBuilder);
let client = pool
.get()
.await
.context("Could not get client from pool.")?;
let row = client
.query_one(&sql, &values.as_params())
.await
.context("Could not find the player.")?;
Ok(Some(Player::from(row)))
}
async fn find_all(pool: &SqlPool) -> Result<Vec<Player>> {
let (sql, values) = Query::select()
.columns(PlayerTypeDef::columns().copied().collect::<Vec<_>>())
.from(PlayerTypeDef::Table)
.order_by(PlayerTypeDef::Uuid, Order::Asc)
.build(PostgresQueryBuilder);
let client = pool
.get()
.await
.context("Could not get client from pool.")?;
let rows = client
.query(&sql, &values.as_params())
.await
.context("Could not find all the players.")?
.into_iter()
.map(Player::from)
.collect::<Vec<_>>();
Ok(rows)
}
async fn create(&self, pool: &SqlPool) -> Result<()> {
let (sql, values) = create_internally(self).build(PostgresQueryBuilder);
let client = pool
.get()
.await
.context("Could not get client from pool.")?;
client
.query_one(&sql, &values.as_params())
.await
.context("Failed to create the player.")?;
Ok(())
}
async fn update(&self, pool: &SqlPool) -> Result<()> {
let (sql, values) = create_internally(self)
.on_conflict(
OnConflict::column(PlayerTypeDef::Uuid)
.update_columns(PlayerTypeDef::columns().copied().collect::<Vec<_>>())
.to_owned(),
)
.build(PostgresQueryBuilder);
let client = pool
.get()
.await
.context("Could not get client from pool.")?;
client
.execute(&sql, &values.as_params())
.await
.context("Failed to update the player.")?;
Ok(())
}
async fn find_all_with_offset(pool: &SqlPool, offset: u64, limit: u64) -> Result<Vec<Player>> {
let (sql, values) = Query::select()
.columns(PlayerTypeDef::columns().copied().collect::<Vec<_>>())
.from(PlayerTypeDef::Table)
.order_by(PlayerTypeDef::Uuid, Order::Asc)
.limit(limit)
.offset(offset)
.build(PostgresQueryBuilder);
let client = pool
.get()
.await
.context("Could not get client from pool.")?;
let rows = client
.query(&sql, &values.as_params())
.await
.context("Could not find all the players.")?
.into_iter()
.map(Player::from)
.collect::<Vec<_>>();
Ok(rows)
}
async fn delete(pool: &SqlPool, id: Either<Uuid, String>) -> Result<()> {
let expr = match id {
Either::Left(uuid) => Expr::col(PlayerTypeDef::Uuid).eq(uuid),
Either::Right(username) => Expr::col(PlayerTypeDef::LastUsername).eq(username),
};
let (sql, values) = Query::delete()
.from_table(PlayerTypeDef::Table)
.and_where(expr)
.build(PostgresQueryBuilder);
let client = pool
.get()
.await
.context("Could not get client from pool.")?;
client
.execute(&sql, &values.as_params())
.await
.context("Failed to delete the player.")?;
Ok(())
}
}
| 33.518828 | 99 | 0.526901 |
1d3126128c92b8aa3f99b09aade38e64a9c8ed2c | 212 | // run-pass
// aux-build:static_fn_trait_xc_aux.rs
// pretty-expanded FIXME #23616
extern crate static_fn_trait_xc_aux as mycore;
use mycore::num;
pub fn main() {
let _1: f64 = num::Num2::from_int2(1);
}
| 16.307692 | 46 | 0.707547 |
38282dddaa09261e05e49355c2ac992e9c1c2ced | 5,782 | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
//! Load timeout configuration properties from an AWS profile
use crate::profile::Profile;
use crate::provider_config::ProviderConfig;
use aws_smithy_types::timeout::{parse_str_as_timeout, TimeoutConfig, TimeoutConfigError};
use aws_types::os_shim_internal::{Env, Fs};
use std::time::Duration;
const PROFILE_VAR_CONNECT_TIMEOUT: &str = "connect_timeout";
const PROFILE_VAR_TLS_NEGOTIATION_TIMEOUT: &str = "tls_negotiation_timeout";
const PROFILE_VAR_READ_TIMEOUT: &str = "read_timeout";
const PROFILE_VAR_API_CALL_ATTEMPT_TIMEOUT: &str = "api_call_attempt_timeout";
const PROFILE_VAR_API_CALL_TIMEOUT: &str = "api_call_timeout";
/// Load timeout configuration properties from a profile file
///
/// This provider will attempt to load AWS shared configuration, then read timeout configuration
/// properties from the active profile. Timeout values represent the number of seconds before timing
/// out and must be non-negative floats or integers. NaN and infinity are also invalid. If at least
/// one of these values is valid, construction will succeed.
///
/// # Examples
///
/// **Sets timeouts for the `default` profile**
/// ```ini
/// [default]
/// connect_timeout = 1.0
/// read_timeout = 1.0
/// tls_negotiation_timeout = 0.5
/// api_call_attempt_timeout = 2
/// api_call_timeout = 3
/// ```
///
/// **Sets the `connect_timeout` to 0.5 seconds _if and only if_ the `other` profile is selected.**
///
/// ```ini
/// [profile other]
/// connect_timeout = 0.5
/// ```
///
/// This provider is part of the [default timeout config provider chain](crate::default_provider::timeout_config).
#[derive(Debug, Default)]
pub struct ProfileFileTimeoutConfigProvider {
fs: Fs,
env: Env,
profile_override: Option<String>,
}
/// Builder for [`ProfileFileTimeoutConfigProvider`]
#[derive(Default)]
pub struct Builder {
config: Option<ProviderConfig>,
profile_override: Option<String>,
}
impl Builder {
/// Override the configuration for this provider
pub fn configure(mut self, config: &ProviderConfig) -> Self {
self.config = Some(config.clone());
self
}
/// Override the profile name used by the [`ProfileFileTimeoutConfigProvider`]
pub fn profile_name(mut self, profile_name: impl Into<String>) -> Self {
self.profile_override = Some(profile_name.into());
self
}
/// Build a [`ProfileFileTimeoutConfigProvider`] from this builder
pub fn build(self) -> ProfileFileTimeoutConfigProvider {
let conf = self.config.unwrap_or_default();
ProfileFileTimeoutConfigProvider {
env: conf.env(),
fs: conf.fs(),
profile_override: self.profile_override,
}
}
}
impl ProfileFileTimeoutConfigProvider {
/// Create a new [`ProfileFileTimeoutConfigProvider`]
///
/// To override the selected profile, set the `AWS_PROFILE` environment variable or use the [`Builder`].
pub fn new() -> Self {
Self {
fs: Fs::real(),
env: Env::real(),
profile_override: None,
}
}
/// [`Builder`] to construct a [`ProfileFileTimeoutConfigProvider`]
pub fn builder() -> Builder {
Builder::default()
}
/// Attempt to create a new [`TimeoutConfig`] from a profile file.
pub async fn timeout_config(&self) -> Result<TimeoutConfig, TimeoutConfigError> {
let profile = match super::parser::load(&self.fs, &self.env).await {
Ok(profile) => profile,
Err(err) => {
tracing::warn!(err = %err, "failed to parse profile, skipping it");
// return an empty builder
return Ok(Default::default());
}
};
let selected_profile = self
.profile_override
.as_deref()
.unwrap_or_else(|| profile.selected_profile());
let selected_profile = match profile.get_profile(selected_profile) {
Some(profile) => profile,
None => {
tracing::warn!(
"failed to get selected '{}' profile, skipping it",
selected_profile
);
// return an empty config
return Ok(TimeoutConfig::new());
}
};
let connect_timeout =
construct_timeout_from_profile_var(selected_profile, PROFILE_VAR_CONNECT_TIMEOUT)?;
let tls_negotiation_timeout = construct_timeout_from_profile_var(
selected_profile,
PROFILE_VAR_TLS_NEGOTIATION_TIMEOUT,
)?;
let read_timeout =
construct_timeout_from_profile_var(selected_profile, PROFILE_VAR_READ_TIMEOUT)?;
let api_call_attempt_timeout = construct_timeout_from_profile_var(
selected_profile,
PROFILE_VAR_API_CALL_ATTEMPT_TIMEOUT,
)?;
let api_call_timeout =
construct_timeout_from_profile_var(selected_profile, PROFILE_VAR_API_CALL_TIMEOUT)?;
Ok(TimeoutConfig::new()
.with_connect_timeout(connect_timeout)
.with_tls_negotiation_timeout(tls_negotiation_timeout)
.with_read_timeout(read_timeout)
.with_api_call_attempt_timeout(api_call_attempt_timeout)
.with_api_call_timeout(api_call_timeout))
}
}
fn construct_timeout_from_profile_var(
profile: &Profile,
var: &'static str,
) -> Result<Option<Duration>, TimeoutConfigError> {
let profile_name = format!("aws profile [{}]", profile.name());
match profile.get(var) {
Some(timeout) => parse_str_as_timeout(timeout, var.into(), profile_name.into()).map(Some),
None => Ok(None),
}
}
| 35.472393 | 114 | 0.657731 |
d64213b8c79c9275f5e63a5a677f6774cb3e8bac | 628 | mod db;
use actix_web::{middleware, web, App, HttpRequest, HttpServer};
async fn index(req: HttpRequest) -> &'static str {
println!("REQ: {:?}", req);
"Hello world!"
}
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
HttpServer::new(|| {
App::new()
// enable logger
.wrap(middleware::Logger::default())
.service(web::resource("/index.html").to(|| async { "Hello world!" }))
.service(web::resource("/").to(index))
})
.bind("0.0.0.0:8000")?
.run()
.await
}
| 26.166667 | 82 | 0.542994 |
ccc048152a5f81f72b9863158c2c80a8e4496cba | 14,177 | use swc_html_ast::*;
use crate::parser::{
is_html_integration_point, is_mathml_text_integration_point, is_same_node, Data, RcNode,
};
static IMPLICIT_END_TAG_REQUIRED: &[&str] = &[
"dd", "dt", "li", "optgroup", "option", "p", "rb", "rp", "rt", "rtc",
];
static IMPLICIT_END_TAG_REQUIRED_THOROUGHLY: &[&str] = &[
"caption", "colgroup", "dd", "dt", "li", "optgroup", "option", "p", "rb", "rp", "rt", "rtc",
"tbody", "td", "tfoot", "th", "thead", "tr",
];
static SPECIFIC_SCOPE: &[(&str, Namespace)] = &[
("applet", Namespace::HTML),
("caption", Namespace::HTML),
("html", Namespace::HTML),
("marquee", Namespace::HTML),
("object", Namespace::HTML),
("table", Namespace::HTML),
("td", Namespace::HTML),
("template", Namespace::HTML),
("th", Namespace::HTML),
("annotation-xml", Namespace::MATHML),
("mi", Namespace::MATHML),
("mn", Namespace::MATHML),
("mo", Namespace::MATHML),
("ms", Namespace::MATHML),
("mtext", Namespace::MATHML),
("desc", Namespace::SVG),
("foreignObject", Namespace::SVG),
("title", Namespace::SVG),
];
static LIST_ITEM_SCOPE: &[(&str, Namespace)] = &[
("applet", Namespace::HTML),
("caption", Namespace::HTML),
("html", Namespace::HTML),
("marquee", Namespace::HTML),
("object", Namespace::HTML),
("table", Namespace::HTML),
("td", Namespace::HTML),
("template", Namespace::HTML),
("th", Namespace::HTML),
("annotation-xml", Namespace::MATHML),
("mi", Namespace::MATHML),
("mn", Namespace::MATHML),
("mo", Namespace::MATHML),
("ms", Namespace::MATHML),
("mtext", Namespace::MATHML),
("desc", Namespace::SVG),
("foreignObject", Namespace::SVG),
("title", Namespace::SVG),
("ol", Namespace::HTML),
("ul", Namespace::HTML),
];
static BUTTON_SCOPE: &[(&str, Namespace)] = &[
("applet", Namespace::HTML),
("caption", Namespace::HTML),
("html", Namespace::HTML),
("marquee", Namespace::HTML),
("object", Namespace::HTML),
("table", Namespace::HTML),
("td", Namespace::HTML),
("template", Namespace::HTML),
("th", Namespace::HTML),
("annotation-xml", Namespace::MATHML),
("mi", Namespace::MATHML),
("mn", Namespace::MATHML),
("mo", Namespace::MATHML),
("ms", Namespace::MATHML),
("mtext", Namespace::MATHML),
("desc", Namespace::SVG),
("foreignObject", Namespace::SVG),
("title", Namespace::SVG),
("button", Namespace::HTML),
];
static TABLE_SCOPE: &[(&str, Namespace)] = &[
("html", Namespace::HTML),
("table", Namespace::HTML),
("template", Namespace::HTML),
];
static SELECT_SCOPE: &[(&str, Namespace)] =
&[("optgroup", Namespace::HTML), ("option", Namespace::HTML)];
pub struct OpenElementsStack {
pub items: Vec<RcNode>,
template_element_count: usize,
}
impl OpenElementsStack {
pub fn new() -> Self {
OpenElementsStack {
items: vec![],
template_element_count: 0,
}
}
pub fn push(&mut self, node: RcNode) {
if get_tag_name!(node) == "template" {
self.template_element_count += 1;
}
self.items.push(node);
}
pub fn pop(&mut self) -> Option<RcNode> {
let popped = self.items.pop();
if let Some(node) = &popped {
if get_tag_name!(node) == "template" {
self.template_element_count -= 1;
}
}
popped
}
pub fn remove(&mut self, node: &RcNode) {
let position = self.items.iter().rposition(|x| is_same_node(node, x));
if let Some(position) = position {
if get_tag_name!(node) == "template" {
self.template_element_count -= 1;
}
self.items.remove(position);
}
}
pub fn contains_template_element(&self) -> bool {
self.template_element_count > 0
}
// The stack of open elements is said to have an element target node in a
// specific scope consisting of a list of element types list when the following
// algorithm terminates in a match state:
fn has_element_target_node_in_specific_scope(
&self,
tag_name: &str,
list: &[(&str, Namespace)],
) -> bool {
let mut iter = self.items.iter().rev();
// 1. Initialize node to be the current node (the bottommost node of the stack).
let mut node = iter.next();
while let Some(inner_node) = node {
// 2. If node is the target node, terminate in a match state.
if get_tag_name!(inner_node) == tag_name
&& get_namespace!(inner_node) == Namespace::HTML
{
return true;
}
// 3. Otherwise, if node is one of the element types in list, terminate in a
// failure state.
for element_and_ns in list {
if get_tag_name!(inner_node) == element_and_ns.0
&& get_namespace!(inner_node) == element_and_ns.1
{
return false;
}
}
// 4. Otherwise, set node to the previous entry in the stack of open elements
// and return to step 2. (This will never fail, since the loop will always
// terminate in the previous step if the top of the stack — an html element — is
// reached.)
node = iter.next();
}
false
}
// The stack of open elements is said to have a particular element in scope when
// it has that element in the specific scope consisting of the following element
// types:
//
// applet
// caption
// html
// table
// td
// th
// marquee
// object
// template
// MathML mi
// MathML mo
// MathML mn
// MathML ms
// MathML mtext
// MathML annotation-xml
// SVG foreignObject
// SVG desc
// SVG title
pub fn has_in_scope(&self, tag_name: &str) -> bool {
self.has_element_target_node_in_specific_scope(tag_name, SPECIFIC_SCOPE)
}
pub fn has_node_in_scope(&self, target: &RcNode) -> bool {
// self.has_element_target_node_in_specific_scope(tag_name, SPECIFIC_SCOPE)
let mut iter = self.items.iter().rev();
// 1. Initialize node to be the current node (the bottommost node of the stack).
let mut node = iter.next();
while let Some(inner_node) = node {
// 2. If node is the target node, terminate in a match state.
if is_same_node(target, inner_node) {
return true;
}
// 3. Otherwise, if node is one of the element types in list, terminate in a
// failure state.
for element_and_ns in SPECIFIC_SCOPE {
if get_tag_name!(inner_node) == element_and_ns.0
&& get_namespace!(inner_node) == element_and_ns.1
{
return false;
}
}
// 4. Otherwise, set node to the previous entry in the stack of open elements
// and return to step 2. (This will never fail, since the loop will always
// terminate in the previous step if the top of the stack — an html element — is
// reached.)
node = iter.next();
}
false
}
// The stack of open elements is said to have a particular element in list item
// scope when it has that element in the specific scope consisting of the
// following element types:
//
// All the element types listed above for the has an element in scope algorithm.
// ol in the HTML namespace
// ul in the HTML namespace
pub fn has_in_list_item_scope(&self, tag_name: &str) -> bool {
self.has_element_target_node_in_specific_scope(tag_name, LIST_ITEM_SCOPE)
}
// The stack of open elements is said to have a particular element in button
// scope when it has that element in the specific scope consisting of the
// following element types:
//
// All the element types listed above for the has an element in scope algorithm.
// button in the HTML namespace
pub fn has_in_button_scope(&self, tag_name: &str) -> bool {
self.has_element_target_node_in_specific_scope(tag_name, BUTTON_SCOPE)
}
// The stack of open elements is said to have a particular element in table
// scope when it has that element in the specific scope consisting of the
// following element types:
//
// html in the HTML namespace
// table in the HTML namespace
// template in the HTML namespace
pub fn has_in_table_scope(&self, tag_name: &str) -> bool {
self.has_element_target_node_in_specific_scope(tag_name, TABLE_SCOPE)
}
// The stack of open elements is said to have a particular element in select
// scope when it has that element in the specific scope consisting of all
// element types except the following:
//
// optgroup in the HTML namespace
// option in the HTML namespace
pub fn has_in_select_scope(&self, tag_name: &str) -> bool {
self.has_element_target_node_in_specific_scope(tag_name, SELECT_SCOPE)
}
// When the steps above require the UA to clear the stack back to a table
// context, it means that the UA must, while the current node is not a table,
// template, or html element, pop elements from the stack of open elements.
pub fn clear_back_to_table_context(&mut self) {
while let Some(node) = self.items.last() {
if !matches!(get_tag_name!(node), "table" | "template" | "html") {
self.pop();
} else {
break;
}
}
}
// When the steps above require the UA to clear the stack back to a table row
// context, it means that the UA must, while the current node is not a tr,
// template, or html element, pop elements from the stack of open elements.
pub fn clear_back_to_table_row_context(&mut self) {
while let Some(node) = self.items.last() {
if !matches!(get_tag_name!(node), "tr" | "template" | "html") {
self.pop();
} else {
break;
}
}
}
// When the steps above require the UA to clear the stack back to a table body
// context, it means that the UA must, while the current node is not a tbody,
// tfoot, thead, template, or html element, pop elements from the stack of open
// elements.
pub fn clear_back_to_table_body_context(&mut self) {
while let Some(node) = self.items.last() {
if !matches!(
get_tag_name!(node),
"thead" | "tfoot" | "tbody" | "template" | "html"
) {
self.pop();
} else {
break;
}
}
}
// When the steps below require the UA to generate implied end tags, then, while
// the current node is a dd element, a dt element, an li element, an optgroup
// element, an option element, a p element, an rb element, an rp element, an rt
// element, or an rtc element, the UA must pop the current node off the stack of
// open elements.
//
// If a step requires the UA to generate implied end tags but lists an element
// to exclude from the process, then the UA must perform the above steps as if
// that element was not in the above list.
pub fn generate_implied_end_tags(&mut self) {
while let Some(node) = self.items.last() {
if IMPLICIT_END_TAG_REQUIRED.contains(&get_tag_name!(node)) {
self.pop();
} else {
break;
}
}
}
pub fn generate_implied_end_tags_with_exclusion(&mut self, tag_name: &str) {
while let Some(node) = self.items.last() {
if get_tag_name!(node) == tag_name {
break;
}
if IMPLICIT_END_TAG_REQUIRED.contains(&get_tag_name!(node)) {
self.pop();
} else {
break;
}
}
}
// When the steps below require the UA to generate all implied end tags
// thoroughly, then, while the current node is a caption element, a colgroup
// element, a dd element, a dt element, an li element, an optgroup element, an
// option element, a p element, an rb element, an rp element, an rt element, an
// rtc element, a tbody element, a td element, a tfoot element, a th element, a
// thead element, or a tr element, the UA must pop the current node off the
// stack of open elements.
pub fn generate_implied_end_tags_thoroughly(&mut self) {
while let Some(node) = self.items.last() {
if IMPLICIT_END_TAG_REQUIRED_THOROUGHLY.contains(&get_tag_name!(node)) {
self.pop();
} else {
break;
}
}
}
pub fn pop_until_tag_name_popped(&mut self, tag_name: &[&str]) {
while let Some(node) = self.pop() {
if tag_name.contains(&get_tag_name!(node)) && get_namespace!(node) == Namespace::HTML {
break;
}
}
}
pub fn pop_until_node(&mut self, until_to_node: &RcNode) {
while let Some(node) = &self.pop() {
if is_same_node(node, until_to_node) {
break;
}
}
}
// While the current node is not a MathML text integration point, an HTML
// integration point, or an element in the HTML namespace, pop elements from
// the stack of open elements.
pub fn pop_until_in_foreign(&mut self) {
while let Some(node) = self.pop() {
match &node.data {
Data::Element(Element { namespace, .. }) if *namespace == Namespace::HTML => {
break;
}
_ if is_mathml_text_integration_point(Some(&node))
|| is_html_integration_point(Some(&node)) =>
{
break;
}
_ => {}
}
}
}
}
| 34.662592 | 99 | 0.579742 |
d9a9be91c1e863ac45c979f9fe291b98d54047b3 | 25,011 | #[doc = "Reader of register CH2_CTRL"]
pub type R = crate::R<u32, super::CH2_CTRL>;
#[doc = "Writer for register CH2_CTRL"]
pub type W = crate::W<u32, super::CH2_CTRL>;
#[doc = "Register CH2_CTRL `reset()`'s with value 0"]
impl crate::ResetValue for super::CH2_CTRL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type { 0 }
}
#[doc = "DMA Structure Type\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum STRUCTTYPE_A {
#[doc = "0: DMA transfer structure type selected."]
TRANSFER = 0,
#[doc = "1: Synchronization structure type selected."]
SYNCHRONIZE = 1,
#[doc = "2: Write immediate value structure type selected."]
WRITE = 2,
}
impl From<STRUCTTYPE_A> for u8 {
#[inline(always)]
fn from(variant: STRUCTTYPE_A) -> Self { variant as _ }
}
#[doc = "Reader of field `STRUCTTYPE`"]
pub type STRUCTTYPE_R = crate::R<u8, STRUCTTYPE_A>;
impl STRUCTTYPE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, STRUCTTYPE_A> {
use crate::Variant::*;
match self.bits {
0 => Val(STRUCTTYPE_A::TRANSFER),
1 => Val(STRUCTTYPE_A::SYNCHRONIZE),
2 => Val(STRUCTTYPE_A::WRITE),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `TRANSFER`"]
#[inline(always)]
pub fn is_transfer(&self) -> bool { *self == STRUCTTYPE_A::TRANSFER }
#[doc = "Checks if the value of the field is `SYNCHRONIZE`"]
#[inline(always)]
pub fn is_synchronize(&self) -> bool { *self == STRUCTTYPE_A::SYNCHRONIZE }
#[doc = "Checks if the value of the field is `WRITE`"]
#[inline(always)]
pub fn is_write(&self) -> bool { *self == STRUCTTYPE_A::WRITE }
}
#[doc = "Write proxy for field `STRUCTREQ`"]
pub struct STRUCTREQ_W<'a> {
w: &'a mut W,
}
impl<'a> STRUCTREQ_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `XFERCNT`"]
pub type XFERCNT_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `XFERCNT`"]
pub struct XFERCNT_W<'a> {
w: &'a mut W,
}
impl<'a> XFERCNT_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07ff << 4)) | (((value as u32) & 0x07ff) << 4);
self.w
}
}
#[doc = "Reader of field `BYTESWAP`"]
pub type BYTESWAP_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `BYTESWAP`"]
pub struct BYTESWAP_W<'a> {
w: &'a mut W,
}
impl<'a> BYTESWAP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Block Transfer Size\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum BLOCKSIZE_A {
#[doc = "0: One unit transfer per arbitration"]
UNIT1 = 0,
#[doc = "1: Two unit transfers per arbitration"]
UNIT2 = 1,
#[doc = "2: Three unit transfers per arbitration"]
UNIT3 = 2,
#[doc = "3: Four unit transfers per arbitration"]
UNIT4 = 3,
#[doc = "4: Six unit transfers per arbitration"]
UNIT6 = 4,
#[doc = "5: Eight unit transfers per arbitration"]
UNIT8 = 5,
#[doc = "7: Sixteen unit transfers per arbitration"]
UNIT16 = 7,
#[doc = "9: 32 unit transfers per arbitration"]
UNIT32 = 9,
#[doc = "10: 64 unit transfers per arbitration"]
UNIT64 = 10,
#[doc = "11: 128 unit transfers per arbitration"]
UNIT128 = 11,
#[doc = "12: 256 unit transfers per arbitration"]
UNIT256 = 12,
#[doc = "13: 512 unit transfers per arbitration"]
UNIT512 = 13,
#[doc = "14: 1024 unit transfers per arbitration"]
UNIT1024 = 14,
#[doc = "15: Transfer all units as specified by the XFRCNT field"]
ALL = 15,
}
impl From<BLOCKSIZE_A> for u8 {
#[inline(always)]
fn from(variant: BLOCKSIZE_A) -> Self { variant as _ }
}
#[doc = "Reader of field `BLOCKSIZE`"]
pub type BLOCKSIZE_R = crate::R<u8, BLOCKSIZE_A>;
impl BLOCKSIZE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, BLOCKSIZE_A> {
use crate::Variant::*;
match self.bits {
0 => Val(BLOCKSIZE_A::UNIT1),
1 => Val(BLOCKSIZE_A::UNIT2),
2 => Val(BLOCKSIZE_A::UNIT3),
3 => Val(BLOCKSIZE_A::UNIT4),
4 => Val(BLOCKSIZE_A::UNIT6),
5 => Val(BLOCKSIZE_A::UNIT8),
7 => Val(BLOCKSIZE_A::UNIT16),
9 => Val(BLOCKSIZE_A::UNIT32),
10 => Val(BLOCKSIZE_A::UNIT64),
11 => Val(BLOCKSIZE_A::UNIT128),
12 => Val(BLOCKSIZE_A::UNIT256),
13 => Val(BLOCKSIZE_A::UNIT512),
14 => Val(BLOCKSIZE_A::UNIT1024),
15 => Val(BLOCKSIZE_A::ALL),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `UNIT1`"]
#[inline(always)]
pub fn is_unit1(&self) -> bool { *self == BLOCKSIZE_A::UNIT1 }
#[doc = "Checks if the value of the field is `UNIT2`"]
#[inline(always)]
pub fn is_unit2(&self) -> bool { *self == BLOCKSIZE_A::UNIT2 }
#[doc = "Checks if the value of the field is `UNIT3`"]
#[inline(always)]
pub fn is_unit3(&self) -> bool { *self == BLOCKSIZE_A::UNIT3 }
#[doc = "Checks if the value of the field is `UNIT4`"]
#[inline(always)]
pub fn is_unit4(&self) -> bool { *self == BLOCKSIZE_A::UNIT4 }
#[doc = "Checks if the value of the field is `UNIT6`"]
#[inline(always)]
pub fn is_unit6(&self) -> bool { *self == BLOCKSIZE_A::UNIT6 }
#[doc = "Checks if the value of the field is `UNIT8`"]
#[inline(always)]
pub fn is_unit8(&self) -> bool { *self == BLOCKSIZE_A::UNIT8 }
#[doc = "Checks if the value of the field is `UNIT16`"]
#[inline(always)]
pub fn is_unit16(&self) -> bool { *self == BLOCKSIZE_A::UNIT16 }
#[doc = "Checks if the value of the field is `UNIT32`"]
#[inline(always)]
pub fn is_unit32(&self) -> bool { *self == BLOCKSIZE_A::UNIT32 }
#[doc = "Checks if the value of the field is `UNIT64`"]
#[inline(always)]
pub fn is_unit64(&self) -> bool { *self == BLOCKSIZE_A::UNIT64 }
#[doc = "Checks if the value of the field is `UNIT128`"]
#[inline(always)]
pub fn is_unit128(&self) -> bool { *self == BLOCKSIZE_A::UNIT128 }
#[doc = "Checks if the value of the field is `UNIT256`"]
#[inline(always)]
pub fn is_unit256(&self) -> bool { *self == BLOCKSIZE_A::UNIT256 }
#[doc = "Checks if the value of the field is `UNIT512`"]
#[inline(always)]
pub fn is_unit512(&self) -> bool { *self == BLOCKSIZE_A::UNIT512 }
#[doc = "Checks if the value of the field is `UNIT1024`"]
#[inline(always)]
pub fn is_unit1024(&self) -> bool { *self == BLOCKSIZE_A::UNIT1024 }
#[doc = "Checks if the value of the field is `ALL`"]
#[inline(always)]
pub fn is_all(&self) -> bool { *self == BLOCKSIZE_A::ALL }
}
#[doc = "Write proxy for field `BLOCKSIZE`"]
pub struct BLOCKSIZE_W<'a> {
w: &'a mut W,
}
impl<'a> BLOCKSIZE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: BLOCKSIZE_A) -> &'a mut W { unsafe { self.bits(variant.into()) } }
#[doc = "One unit transfer per arbitration"]
#[inline(always)]
pub fn unit1(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT1) }
#[doc = "Two unit transfers per arbitration"]
#[inline(always)]
pub fn unit2(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT2) }
#[doc = "Three unit transfers per arbitration"]
#[inline(always)]
pub fn unit3(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT3) }
#[doc = "Four unit transfers per arbitration"]
#[inline(always)]
pub fn unit4(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT4) }
#[doc = "Six unit transfers per arbitration"]
#[inline(always)]
pub fn unit6(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT6) }
#[doc = "Eight unit transfers per arbitration"]
#[inline(always)]
pub fn unit8(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT8) }
#[doc = "Sixteen unit transfers per arbitration"]
#[inline(always)]
pub fn unit16(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT16) }
#[doc = "32 unit transfers per arbitration"]
#[inline(always)]
pub fn unit32(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT32) }
#[doc = "64 unit transfers per arbitration"]
#[inline(always)]
pub fn unit64(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT64) }
#[doc = "128 unit transfers per arbitration"]
#[inline(always)]
pub fn unit128(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT128) }
#[doc = "256 unit transfers per arbitration"]
#[inline(always)]
pub fn unit256(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT256) }
#[doc = "512 unit transfers per arbitration"]
#[inline(always)]
pub fn unit512(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT512) }
#[doc = "1024 unit transfers per arbitration"]
#[inline(always)]
pub fn unit1024(self) -> &'a mut W { self.variant(BLOCKSIZE_A::UNIT1024) }
#[doc = "Transfer all units as specified by the XFRCNT field"]
#[inline(always)]
pub fn all(self) -> &'a mut W { self.variant(BLOCKSIZE_A::ALL) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 16)) | (((value as u32) & 0x0f) << 16);
self.w
}
}
#[doc = "Reader of field `DONEIFSEN`"]
pub type DONEIFSEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DONEIFSEN`"]
pub struct DONEIFSEN_W<'a> {
w: &'a mut W,
}
impl<'a> DONEIFSEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Reader of field `REQMODE`"]
pub type REQMODE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `REQMODE`"]
pub struct REQMODE_W<'a> {
w: &'a mut W,
}
impl<'a> REQMODE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
#[doc = "Reader of field `DECLOOPCNT`"]
pub type DECLOOPCNT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DECLOOPCNT`"]
pub struct DECLOOPCNT_W<'a> {
w: &'a mut W,
}
impl<'a> DECLOOPCNT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22);
self.w
}
}
#[doc = "Reader of field `IGNORESREQ`"]
pub type IGNORESREQ_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `IGNORESREQ`"]
pub struct IGNORESREQ_W<'a> {
w: &'a mut W,
}
impl<'a> IGNORESREQ_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W { self.bit(true) }
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23);
self.w
}
}
#[doc = "Source Address Increment Size\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum SRCINC_A {
#[doc = "0: Increment source address by one unit data size after each read"]
ONE = 0,
#[doc = "1: Increment source address by two unit data sizes after each read"]
TWO = 1,
#[doc = "2: Increment source address by four unit data sizes after each read"]
FOUR = 2,
#[doc = "3: Do not increment the source address. In this mode reads are made from a fixed source address, for example reading FIFO."]
NONE = 3,
}
impl From<SRCINC_A> for u8 {
#[inline(always)]
fn from(variant: SRCINC_A) -> Self { variant as _ }
}
#[doc = "Reader of field `SRCINC`"]
pub type SRCINC_R = crate::R<u8, SRCINC_A>;
impl SRCINC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SRCINC_A {
match self.bits {
0 => SRCINC_A::ONE,
1 => SRCINC_A::TWO,
2 => SRCINC_A::FOUR,
3 => SRCINC_A::NONE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ONE`"]
#[inline(always)]
pub fn is_one(&self) -> bool { *self == SRCINC_A::ONE }
#[doc = "Checks if the value of the field is `TWO`"]
#[inline(always)]
pub fn is_two(&self) -> bool { *self == SRCINC_A::TWO }
#[doc = "Checks if the value of the field is `FOUR`"]
#[inline(always)]
pub fn is_four(&self) -> bool { *self == SRCINC_A::FOUR }
#[doc = "Checks if the value of the field is `NONE`"]
#[inline(always)]
pub fn is_none(&self) -> bool { *self == SRCINC_A::NONE }
}
#[doc = "Write proxy for field `SRCINC`"]
pub struct SRCINC_W<'a> {
w: &'a mut W,
}
impl<'a> SRCINC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SRCINC_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Increment source address by one unit data size after each read"]
#[inline(always)]
pub fn one(self) -> &'a mut W { self.variant(SRCINC_A::ONE) }
#[doc = "Increment source address by two unit data sizes after each read"]
#[inline(always)]
pub fn two(self) -> &'a mut W { self.variant(SRCINC_A::TWO) }
#[doc = "Increment source address by four unit data sizes after each read"]
#[inline(always)]
pub fn four(self) -> &'a mut W { self.variant(SRCINC_A::FOUR) }
#[doc = "Do not increment the source address. In this mode reads are made from a fixed source address, for example reading FIFO."]
#[inline(always)]
pub fn none(self) -> &'a mut W { self.variant(SRCINC_A::NONE) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 24)) | (((value as u32) & 0x03) << 24);
self.w
}
}
#[doc = "Unit Data Transfer Size\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum SIZE_A {
#[doc = "0: Each unit transfer is a byte"]
BYTE = 0,
#[doc = "1: Each unit transfer is a half-word"]
HALFWORD = 1,
#[doc = "2: Each unit transfer is a word"]
WORD = 2,
}
impl From<SIZE_A> for u8 {
#[inline(always)]
fn from(variant: SIZE_A) -> Self { variant as _ }
}
#[doc = "Reader of field `SIZE`"]
pub type SIZE_R = crate::R<u8, SIZE_A>;
impl SIZE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, SIZE_A> {
use crate::Variant::*;
match self.bits {
0 => Val(SIZE_A::BYTE),
1 => Val(SIZE_A::HALFWORD),
2 => Val(SIZE_A::WORD),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `BYTE`"]
#[inline(always)]
pub fn is_byte(&self) -> bool { *self == SIZE_A::BYTE }
#[doc = "Checks if the value of the field is `HALFWORD`"]
#[inline(always)]
pub fn is_halfword(&self) -> bool { *self == SIZE_A::HALFWORD }
#[doc = "Checks if the value of the field is `WORD`"]
#[inline(always)]
pub fn is_word(&self) -> bool { *self == SIZE_A::WORD }
}
#[doc = "Write proxy for field `SIZE`"]
pub struct SIZE_W<'a> {
w: &'a mut W,
}
impl<'a> SIZE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SIZE_A) -> &'a mut W { unsafe { self.bits(variant.into()) } }
#[doc = "Each unit transfer is a byte"]
#[inline(always)]
pub fn byte(self) -> &'a mut W { self.variant(SIZE_A::BYTE) }
#[doc = "Each unit transfer is a half-word"]
#[inline(always)]
pub fn halfword(self) -> &'a mut W { self.variant(SIZE_A::HALFWORD) }
#[doc = "Each unit transfer is a word"]
#[inline(always)]
pub fn word(self) -> &'a mut W { self.variant(SIZE_A::WORD) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 26)) | (((value as u32) & 0x03) << 26);
self.w
}
}
#[doc = "Destination Address Increment Size\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum DSTINC_A {
#[doc = "0: Increment destination address by one unit data size after each write"]
ONE = 0,
#[doc = "1: Increment destination address by two unit data sizes after each write"]
TWO = 1,
#[doc = "2: Increment destination address by four unit data sizes after each write"]
FOUR = 2,
#[doc = "3: Do not increment the destination address. Writes are made to a fixed destination address, for example writing to a FIFO."]
NONE = 3,
}
impl From<DSTINC_A> for u8 {
#[inline(always)]
fn from(variant: DSTINC_A) -> Self { variant as _ }
}
#[doc = "Reader of field `DSTINC`"]
pub type DSTINC_R = crate::R<u8, DSTINC_A>;
impl DSTINC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DSTINC_A {
match self.bits {
0 => DSTINC_A::ONE,
1 => DSTINC_A::TWO,
2 => DSTINC_A::FOUR,
3 => DSTINC_A::NONE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ONE`"]
#[inline(always)]
pub fn is_one(&self) -> bool { *self == DSTINC_A::ONE }
#[doc = "Checks if the value of the field is `TWO`"]
#[inline(always)]
pub fn is_two(&self) -> bool { *self == DSTINC_A::TWO }
#[doc = "Checks if the value of the field is `FOUR`"]
#[inline(always)]
pub fn is_four(&self) -> bool { *self == DSTINC_A::FOUR }
#[doc = "Checks if the value of the field is `NONE`"]
#[inline(always)]
pub fn is_none(&self) -> bool { *self == DSTINC_A::NONE }
}
#[doc = "Write proxy for field `DSTINC`"]
pub struct DSTINC_W<'a> {
w: &'a mut W,
}
impl<'a> DSTINC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DSTINC_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Increment destination address by one unit data size after each write"]
#[inline(always)]
pub fn one(self) -> &'a mut W { self.variant(DSTINC_A::ONE) }
#[doc = "Increment destination address by two unit data sizes after each write"]
#[inline(always)]
pub fn two(self) -> &'a mut W { self.variant(DSTINC_A::TWO) }
#[doc = "Increment destination address by four unit data sizes after each write"]
#[inline(always)]
pub fn four(self) -> &'a mut W { self.variant(DSTINC_A::FOUR) }
#[doc = "Do not increment the destination address. Writes are made to a fixed destination address, for example writing to a FIFO."]
#[inline(always)]
pub fn none(self) -> &'a mut W { self.variant(DSTINC_A::NONE) }
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 28)) | (((value as u32) & 0x03) << 28);
self.w
}
}
#[doc = "Reader of field `SRCMODE`"]
pub type SRCMODE_R = crate::R<bool, bool>;
#[doc = "Reader of field `DSTMODE`"]
pub type DSTMODE_R = crate::R<bool, bool>;
impl R {
#[doc = "Bits 0:1 - DMA Structure Type"]
#[inline(always)]
pub fn structtype(&self) -> STRUCTTYPE_R { STRUCTTYPE_R::new((self.bits & 0x03) as u8) }
#[doc = "Bits 4:14 - DMA Unit Data Transfer Count"]
#[inline(always)]
pub fn xfercnt(&self) -> XFERCNT_R { XFERCNT_R::new(((self.bits >> 4) & 0x07ff) as u16) }
#[doc = "Bit 15 - Endian Byte Swap"]
#[inline(always)]
pub fn byteswap(&self) -> BYTESWAP_R { BYTESWAP_R::new(((self.bits >> 15) & 0x01) != 0) }
#[doc = "Bits 16:19 - Block Transfer Size"]
#[inline(always)]
pub fn blocksize(&self) -> BLOCKSIZE_R { BLOCKSIZE_R::new(((self.bits >> 16) & 0x0f) as u8) }
#[doc = "Bit 20 - DMA Operation Done Interrupt Flag Set Enable"]
#[inline(always)]
pub fn doneifsen(&self) -> DONEIFSEN_R { DONEIFSEN_R::new(((self.bits >> 20) & 0x01) != 0) }
#[doc = "Bit 21 - DMA Request Transfer Mode Select"]
#[inline(always)]
pub fn reqmode(&self) -> REQMODE_R { REQMODE_R::new(((self.bits >> 21) & 0x01) != 0) }
#[doc = "Bit 22 - Decrement Loop Count"]
#[inline(always)]
pub fn decloopcnt(&self) -> DECLOOPCNT_R { DECLOOPCNT_R::new(((self.bits >> 22) & 0x01) != 0) }
#[doc = "Bit 23 - Ignore Sreq"]
#[inline(always)]
pub fn ignoresreq(&self) -> IGNORESREQ_R { IGNORESREQ_R::new(((self.bits >> 23) & 0x01) != 0) }
#[doc = "Bits 24:25 - Source Address Increment Size"]
#[inline(always)]
pub fn srcinc(&self) -> SRCINC_R { SRCINC_R::new(((self.bits >> 24) & 0x03) as u8) }
#[doc = "Bits 26:27 - Unit Data Transfer Size"]
#[inline(always)]
pub fn size(&self) -> SIZE_R { SIZE_R::new(((self.bits >> 26) & 0x03) as u8) }
#[doc = "Bits 28:29 - Destination Address Increment Size"]
#[inline(always)]
pub fn dstinc(&self) -> DSTINC_R { DSTINC_R::new(((self.bits >> 28) & 0x03) as u8) }
#[doc = "Bit 30 - Source Addressing Mode"]
#[inline(always)]
pub fn srcmode(&self) -> SRCMODE_R { SRCMODE_R::new(((self.bits >> 30) & 0x01) != 0) }
#[doc = "Bit 31 - Destination Addressing Mode"]
#[inline(always)]
pub fn dstmode(&self) -> DSTMODE_R { DSTMODE_R::new(((self.bits >> 31) & 0x01) != 0) }
}
impl W {
#[doc = "Bit 3 - Structure DMA Transfer Request"]
#[inline(always)]
pub fn structreq(&mut self) -> STRUCTREQ_W { STRUCTREQ_W { w: self } }
#[doc = "Bits 4:14 - DMA Unit Data Transfer Count"]
#[inline(always)]
pub fn xfercnt(&mut self) -> XFERCNT_W { XFERCNT_W { w: self } }
#[doc = "Bit 15 - Endian Byte Swap"]
#[inline(always)]
pub fn byteswap(&mut self) -> BYTESWAP_W { BYTESWAP_W { w: self } }
#[doc = "Bits 16:19 - Block Transfer Size"]
#[inline(always)]
pub fn blocksize(&mut self) -> BLOCKSIZE_W { BLOCKSIZE_W { w: self } }
#[doc = "Bit 20 - DMA Operation Done Interrupt Flag Set Enable"]
#[inline(always)]
pub fn doneifsen(&mut self) -> DONEIFSEN_W { DONEIFSEN_W { w: self } }
#[doc = "Bit 21 - DMA Request Transfer Mode Select"]
#[inline(always)]
pub fn reqmode(&mut self) -> REQMODE_W { REQMODE_W { w: self } }
#[doc = "Bit 22 - Decrement Loop Count"]
#[inline(always)]
pub fn decloopcnt(&mut self) -> DECLOOPCNT_W { DECLOOPCNT_W { w: self } }
#[doc = "Bit 23 - Ignore Sreq"]
#[inline(always)]
pub fn ignoresreq(&mut self) -> IGNORESREQ_W { IGNORESREQ_W { w: self } }
#[doc = "Bits 24:25 - Source Address Increment Size"]
#[inline(always)]
pub fn srcinc(&mut self) -> SRCINC_W { SRCINC_W { w: self } }
#[doc = "Bits 26:27 - Unit Data Transfer Size"]
#[inline(always)]
pub fn size(&mut self) -> SIZE_W { SIZE_W { w: self } }
#[doc = "Bits 28:29 - Destination Address Increment Size"]
#[inline(always)]
pub fn dstinc(&mut self) -> DSTINC_W { DSTINC_W { w: self } }
}
| 39.263736 | 138 | 0.5899 |
755d266c1b9c06540b4c389b6bed0edebc48a882 | 1,174 | fn main() {
// While loops are similar to for loops, but go through the code IF the statement returns true
// Basic while loop
let mut x: i32 = 5; // The mut keyword means that the variable is mutable (editable), this is not needed in some cases
let mut done: bool = false;
while !done {
// The ! sign before the statement will revert the statement, so if the statement returns false, it will invert it to true and vice versa
x += x - 3;
println!("{}", x);
if x % 5 == 0 {
done = true
} // ! Make sure to have a guard clause or else the while loop will loop forever!
} // Here, the loop adds x to itself and subtracts 3, if the number is a multiple of 5, the loop is cancelled
// If you want an infinite loop, you may be tempted to write this
/*
while true {};
*/
// However, rust has a dedicated keyword for this
let mut i: i32 = 1;
loop {
i += 1;
println!("{}", i);
if i == 5000 {
break; // See break_and_continue.rs for more information
}
}
// For obvious reasons I had to make the infinite loop finite
}
| 30.102564 | 145 | 0.597104 |
285d24991d48482dd9c423f10250f0317f796cec | 1,488 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::SYSPLL_CTRL0 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
| 22.892308 | 59 | 0.497984 |
76bed990fc7134ae6c3584ee0c75fb4229c06188 | 33,383 | use cosmwasm_std::{
to_binary, Api, BankMsg, Binary, CanonicalAddr, Coin, CosmosMsg, Env, Extern, HandleResponse,
HumanAddr, InitResponse, Querier, QueryResult, ReadonlyStorage, StdError, StdResult, Storage,
Uint128,
};
use crate::msg::{
space_pad, ContractStatusLevel, HandleAnswer, HandleMsg, InitMsg, QueryAnswer, QueryMsg,
ResponseStatus::{Failure, Success},
};
use crate::rand::sha_256;
use crate::receiver::Snip20ReceiveMsg;
use crate::state::{
get_receiver_hash, get_transfers, read_allowance, read_viewing_key, set_receiver_hash,
store_transfer, write_allowance, write_viewing_key, Balances, Config, Constants,
ReadonlyBalances, ReadonlyConfig,
};
use crate::viewing_key::ViewingKey;
/// We make sure that responses from `handle` are padded to a multiple of this size.
const RESPONSE_BLOCK_SIZE: usize = 256;
pub fn init<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
_env: Env,
msg: InitMsg,
) -> StdResult<InitResponse> {
let mut total_supply: u128 = 0;
{
let mut balances = Balances::from_storage(&mut deps.storage);
for balance in msg.initial_balances {
let balance_address = deps.api.canonical_address(&balance.address)?;
let amount = balance.amount.u128();
balances.set_account_balance(&balance_address, amount);
if let Some(new_total_supply) = total_supply.checked_add(amount) {
total_supply = new_total_supply;
} else {
return Err(StdError::generic_err(
"The sum of all initial balances will exceed the maximum possible total supply",
));
}
}
}
// Check name, symbol, decimals
if !is_valid_name(&msg.name) {
return Err(StdError::generic_err(
"Name is not in the expected format (3-30 UTF-8 bytes)",
));
}
if !is_valid_symbol(&msg.symbol) {
return Err(StdError::generic_err(
"Ticker symbol is not in expected format [A-Z]{3,6}",
));
}
if msg.decimals > 18 {
return Err(StdError::generic_err("Decimals must not exceed 18"));
}
let admin = msg.admin.clone();
let prng_seed = hex::decode(msg.prng_seed).map_err(|e| {
StdError::generic_err(format!("PRNG seed must be a hexadecimal string: {}", e,))
})?;
let prng_seed_hashed = sha_256(&prng_seed);
let mut config = Config::from_storage(&mut deps.storage);
config.set_constants(&Constants {
name: msg.name,
symbol: msg.symbol,
decimals: msg.decimals,
admin,
prng_seed: prng_seed_hashed.to_vec(),
total_supply_is_public: msg.config.public_total_supply(),
})?;
config.set_total_supply(total_supply);
config.set_contract_status(ContractStatusLevel::NormalRun);
config.set_minters(Vec::from([msg.admin]))?;
Ok(InitResponse::default())
}
fn pad_response(response: StdResult<HandleResponse>) -> StdResult<HandleResponse> {
response.map(|mut response| {
response.data = response.data.map(|mut data| {
space_pad(RESPONSE_BLOCK_SIZE, &mut data.0);
data
});
response
})
}
pub fn handle<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
msg: HandleMsg,
) -> StdResult<HandleResponse> {
let contract_status = ReadonlyConfig::from_storage(&deps.storage).contract_status();
match contract_status {
ContractStatusLevel::StopAll | ContractStatusLevel::StopAllButWithdrawals => {
let response = match msg {
HandleMsg::SetContractStatus { level, .. } => set_contract_status(deps, env, level),
HandleMsg::Redeem { amount, .. }
if contract_status == ContractStatusLevel::StopAllButWithdrawals =>
{
try_redeem(deps, env, amount)
}
_ => Err(StdError::generic_err(
"This contract is stopped and this action is not allowed",
)),
};
return pad_response(response);
}
ContractStatusLevel::NormalRun => {} // If it's a normal run just continue
}
let response = match msg {
// Native
HandleMsg::Deposit { .. } => try_deposit(deps, env),
HandleMsg::Redeem { amount, .. } => try_redeem(deps, env, amount),
HandleMsg::Balance { .. } => try_balance(deps, env),
// Base
HandleMsg::Transfer {
recipient, amount, ..
} => try_transfer(deps, env, &recipient, amount),
HandleMsg::Send {
recipient,
amount,
msg,
..
} => try_send(deps, env, &recipient, amount, msg),
HandleMsg::Burn { amount, .. } => try_burn(deps, env, amount),
HandleMsg::RegisterReceive { code_hash, .. } => try_register_receive(deps, env, code_hash),
HandleMsg::CreateViewingKey { entropy, .. } => try_create_key(deps, env, entropy),
HandleMsg::SetViewingKey { key, .. } => try_set_key(deps, env, key),
// Allowance
HandleMsg::IncreaseAllowance {
spender,
amount,
expiration,
..
} => try_increase_allowance(deps, env, spender, amount, expiration),
HandleMsg::DecreaseAllowance {
spender,
amount,
expiration,
..
} => try_decrease_allowance(deps, env, spender, amount, expiration),
HandleMsg::TransferFrom {
owner,
recipient,
amount,
..
} => try_transfer_from(deps, env, &owner, &recipient, amount),
HandleMsg::SendFrom {
owner,
recipient,
amount,
msg,
..
} => try_send_from(deps, env, &owner, &recipient, amount, msg),
HandleMsg::BurnFrom { owner, amount, .. } => try_burn_from(deps, env, &owner, amount),
// Mint
HandleMsg::Mint {
amount, address, ..
} => try_mint(deps, env, address, amount),
// Other
HandleMsg::ChangeAdmin { address, .. } => change_admin(deps, env, address),
HandleMsg::SetContractStatus { level, .. } => set_contract_status(deps, env, level),
HandleMsg::AddMinters { minters, .. } => add_minters(deps, env, minters),
HandleMsg::RemoveMinters { minters, .. } => remove_minters(deps, env, minters),
HandleMsg::SetMinters { minters, .. } => set_minters(deps, env, minters),
};
pad_response(response)
}
pub fn query<S: Storage, A: Api, Q: Querier>(deps: &Extern<S, A, Q>, msg: QueryMsg) -> QueryResult {
match msg {
QueryMsg::TokenInfo {} => query_token_info(&deps.storage),
QueryMsg::ExchangeRate {} => query_exchange_rate(),
QueryMsg::Allowance { owner, spender, .. } => try_check_allowance(deps, owner, spender),
QueryMsg::Minters { .. } => query_minters(deps),
_ => authenticated_queries(deps, msg),
}
}
pub fn authenticated_queries<S: Storage, A: Api, Q: Querier>(
deps: &Extern<S, A, Q>,
msg: QueryMsg,
) -> QueryResult {
let (address, key) = msg.get_validation_params();
let canonical_addr = deps.api.canonical_address(address)?;
let expected_key = read_viewing_key(&deps.storage, &canonical_addr);
if expected_key.is_none() {
// Checking the key will take significant time. We don't want to exit immediately if it isn't set
// in a way which will allow to time the command and determine if a viewing key doesn't exist
key.check_viewing_key(&[0u8; 24]);
return Ok(to_binary(&QueryAnswer::ViewingKeyError {
msg: "Wrong viewing key for this address or viewing key not set".to_string(),
})?);
}
if !key.check_viewing_key(expected_key.unwrap().as_slice()) {
return Ok(to_binary(&QueryAnswer::ViewingKeyError {
msg: "Wrong viewing key for this address or viewing key not set".to_string(),
})?);
}
match msg {
// Base
QueryMsg::Balance { address, .. } => query_balance(&deps, &address),
QueryMsg::TransferHistory {
address,
page,
page_size,
..
} => query_transactions(&deps, &address, page.unwrap_or(0), page_size),
_ => panic!("This query type does not require authentication"),
}
}
/// This function just returns a constant 1:1 rate to uscrt, since that's the purpose of this
/// contract.
fn query_exchange_rate() -> QueryResult {
to_binary(&QueryAnswer::ExchangeRate {
rate: Uint128(1),
denom: "uscrt".to_string(),
})
}
fn query_token_info<S: ReadonlyStorage>(storage: &S) -> QueryResult {
let config = ReadonlyConfig::from_storage(storage);
let constants = config.constants()?;
let total_supply = if constants.total_supply_is_public {
Some(Uint128(config.total_supply()))
} else {
None
};
to_binary(&QueryAnswer::TokenInfo {
name: constants.name,
symbol: constants.symbol,
decimals: constants.decimals,
total_supply,
})
}
pub fn query_transactions<S: Storage, A: Api, Q: Querier>(
deps: &Extern<S, A, Q>,
account: &HumanAddr,
page: u32,
page_size: u32,
) -> StdResult<Binary> {
let address = deps.api.canonical_address(account).unwrap();
let txs = get_transfers(&deps.api, &deps.storage, &address, page, page_size)?;
let result = QueryAnswer::TransferHistory { txs };
to_binary(&result)
}
pub fn query_balance<S: Storage, A: Api, Q: Querier>(
deps: &Extern<S, A, Q>,
account: &HumanAddr,
) -> StdResult<Binary> {
let address = deps.api.canonical_address(account)?;
let response = QueryAnswer::Balance {
amount: Uint128(get_balance(&deps.storage, &address)),
};
to_binary(&response)
}
fn query_minters<S: Storage, A: Api, Q: Querier>(deps: &Extern<S, A, Q>) -> StdResult<Binary> {
let minters = ReadonlyConfig::from_storage(&deps.storage).minters();
let response = QueryAnswer::Minters { minters };
to_binary(&response)
}
fn change_admin<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
address: HumanAddr,
) -> StdResult<HandleResponse> {
let mut config = Config::from_storage(&mut deps.storage);
check_if_admin(&config, &env.message.sender)?;
let mut consts = config.constants()?;
consts.admin = address;
config.set_constants(&consts)?;
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::ChangeAdmin { status: Success })?),
})
}
fn try_mint<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
address: HumanAddr,
amount: Uint128,
) -> StdResult<HandleResponse> {
let mut config = Config::from_storage(&mut deps.storage);
let minters = config.minters();
if minters.contains(&env.message.sender) {
return Err(StdError::generic_err(
"Minting is allowed to minter accounts only",
));
}
let amount = amount.u128();
let mut total_supply = config.total_supply();
if let Some(new_total_supply) = total_supply.checked_add(amount) {
total_supply = new_total_supply;
} else {
return Err(StdError::generic_err(
"This mint attempt would increase the total supply above the supported maximum",
));
}
config.set_total_supply(total_supply);
let receipient_account = &deps.api.canonical_address(&address)?;
let mut balances = Balances::from_storage(&mut deps.storage);
let mut account_balance = balances.balance(receipient_account);
if let Some(new_balance) = account_balance.checked_add(amount) {
account_balance = new_balance;
} else {
// This error literally can not happen, since the account's funds are a subset
// of the total supply, both are stored as u128, and we check for overflow of
// the total supply just a couple lines before.
// Still, writing this to cover all overflows.
return Err(StdError::generic_err(
"This mint attempt would increase the account's balance above the supported maximum",
));
}
balances.set_account_balance(receipient_account, account_balance);
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::Mint { status: Success })?),
};
Ok(res)
}
pub fn try_set_key<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
key: String,
) -> StdResult<HandleResponse> {
let vk = ViewingKey(key);
if !vk.is_valid() {
return Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::SetViewingKey { status: Failure })?),
});
}
let message_sender = deps.api.canonical_address(&env.message.sender)?;
write_viewing_key(&mut deps.storage, &message_sender, &vk);
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::SetViewingKey { status: Success })?),
})
}
pub fn try_create_key<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
entropy: String,
) -> StdResult<HandleResponse> {
let constants = ReadonlyConfig::from_storage(&deps.storage).constants()?;
let prng_seed = constants.prng_seed;
let key = ViewingKey::new(&env, &prng_seed, (&entropy).as_ref());
let message_sender = deps.api.canonical_address(&env.message.sender)?;
write_viewing_key(&mut deps.storage, &message_sender, &key);
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::CreateViewingKey { key })?),
})
}
fn set_contract_status<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
status_level: ContractStatusLevel,
) -> StdResult<HandleResponse> {
let mut config = Config::from_storage(&mut deps.storage);
check_if_admin(&config, &env.message.sender)?;
config.set_contract_status(status_level);
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::SetContractStatus {
status: Success,
})?),
})
}
pub fn try_check_allowance<S: Storage, A: Api, Q: Querier>(
deps: &Extern<S, A, Q>,
owner: HumanAddr,
spender: HumanAddr,
) -> StdResult<Binary> {
let owner_address = deps.api.canonical_address(&owner)?;
let spender_address = deps.api.canonical_address(&spender)?;
let allowance = read_allowance(&deps.storage, &owner_address, &spender_address)?;
let response = QueryAnswer::Allowance {
owner,
spender,
allowance: Uint128(allowance.amount),
expiration: allowance.expiration,
};
to_binary(&response)
}
pub fn try_balance<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
) -> StdResult<HandleResponse> {
let sender_address = deps.api.canonical_address(&env.message.sender)?;
let account_balance = get_balance(&deps.storage, &sender_address);
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::Balance {
amount: Uint128(account_balance),
})?),
})
}
fn get_balance<S: Storage>(storage: &S, account: &CanonicalAddr) -> u128 {
ReadonlyBalances::from_storage(storage).account_amount(account)
}
fn try_deposit<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
) -> StdResult<HandleResponse> {
let mut amount = Uint128::zero();
for coin in &env.message.sent_funds {
if coin.denom == "uscrt" {
amount = coin.amount
}
}
if amount.is_zero() {
return Err(StdError::generic_err("No funds were sent to be deposited"));
}
let amount = amount.u128();
let sender_address = deps.api.canonical_address(&env.message.sender)?;
let mut balances = Balances::from_storage(&mut deps.storage);
let account_balance = balances.balance(&sender_address);
if let Some(account_balance) = account_balance.checked_add(amount) {
balances.set_account_balance(&sender_address, account_balance);
} else {
return Err(StdError::generic_err(
"This deposit would overflow your balance",
));
}
let mut config = Config::from_storage(&mut deps.storage);
let total_supply = config.total_supply();
if let Some(total_supply) = total_supply.checked_add(amount) {
config.set_total_supply(total_supply);
} else {
return Err(StdError::generic_err(
"This deposit would overflow the currency's total supply",
));
}
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::Deposit { status: Success })?),
};
Ok(res)
}
fn try_redeem<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
amount: Uint128,
) -> StdResult<HandleResponse> {
let sender_address = deps.api.canonical_address(&env.message.sender)?;
let amount_raw = amount.u128();
let mut balances = Balances::from_storage(&mut deps.storage);
let account_balance = balances.balance(&sender_address);
if let Some(account_balance) = account_balance.checked_sub(amount_raw) {
balances.set_account_balance(&sender_address, account_balance);
} else {
return Err(StdError::generic_err(format!(
"insufficient funds to burn: balance={}, required={}",
account_balance, amount_raw
)));
}
let mut config = Config::from_storage(&mut deps.storage);
let total_supply = config.total_supply();
if let Some(total_supply) = total_supply.checked_sub(amount_raw) {
config.set_total_supply(total_supply);
} else {
return Err(StdError::generic_err(
"You are tyring to redeem more tokens than what is available in the total supply",
));
}
let withdrawl_coins: Vec<Coin> = vec![Coin {
denom: "uscrt".to_string(),
amount,
}];
let res = HandleResponse {
messages: vec![CosmosMsg::Bank(BankMsg::Send {
from_address: env.contract.address,
to_address: env.message.sender,
amount: withdrawl_coins,
})],
log: vec![],
data: Some(to_binary(&HandleAnswer::Redeem { status: Success })?),
};
Ok(res)
}
fn try_transfer_impl<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
recipient: &HumanAddr,
amount: Uint128,
) -> StdResult<()> {
let sender_address = deps.api.canonical_address(&env.message.sender)?;
let recipient_address = deps.api.canonical_address(recipient)?;
perform_transfer(
&mut deps.storage,
&sender_address,
&recipient_address,
amount.u128(),
)?;
let symbol = Config::from_storage(&mut deps.storage).constants()?.symbol;
store_transfer(
&mut deps.storage,
&sender_address,
&recipient_address,
amount,
symbol,
)?;
Ok(())
}
fn try_transfer<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
recipient: &HumanAddr,
amount: Uint128,
) -> StdResult<HandleResponse> {
try_transfer_impl(deps, env, recipient, amount)?;
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::Transfer { status: Success })?),
};
Ok(res)
}
fn try_add_receiver_api_callback<S: ReadonlyStorage>(
messages: &mut Vec<CosmosMsg>,
storage: &S,
recipient: &HumanAddr,
msg: Option<Binary>,
sender: HumanAddr,
amount: Uint128,
) -> StdResult<()> {
let receiver_hash = get_receiver_hash(storage, recipient);
if let Some(receiver_hash) = receiver_hash {
let receiver_hash = receiver_hash?;
let receiver_msg = Snip20ReceiveMsg::new(sender, amount, msg);
let callback_msg = receiver_msg.into_cosmos_msg(receiver_hash, recipient.clone())?;
messages.push(callback_msg);
}
Ok(())
}
fn try_send<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
recipient: &HumanAddr,
amount: Uint128,
msg: Option<Binary>,
) -> StdResult<HandleResponse> {
let sender = env.message.sender.clone();
try_transfer_impl(deps, env, recipient, amount)?;
let mut messages = vec![];
try_add_receiver_api_callback(&mut messages, &deps.storage, recipient, msg, sender, amount)?;
let res = HandleResponse {
messages,
log: vec![],
data: Some(to_binary(&HandleAnswer::Send { status: Success })?),
};
Ok(res)
}
fn try_register_receive<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
code_hash: String,
) -> StdResult<HandleResponse> {
set_receiver_hash(&mut deps.storage, &env.message.sender, code_hash);
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::RegisterReceive {
status: Success,
})?),
};
Ok(res)
}
fn insufficient_allowance(allowance: u128, required: u128) -> StdError {
StdError::generic_err(format!(
"Insufficient allowance: allowance={}, required={}",
allowance, required
))
}
fn try_transfer_from_impl<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
owner: &HumanAddr,
recipient: &HumanAddr,
amount: Uint128,
) -> StdResult<()> {
let spender_address = deps.api.canonical_address(&env.message.sender)?;
let owner_address = deps.api.canonical_address(owner)?;
let recipient_address = deps.api.canonical_address(recipient)?;
let amount_raw = amount.u128();
let mut allowance = read_allowance(&deps.storage, &owner_address, &spender_address)?;
if allowance.expiration.map(|ex| ex < env.block.time) == Some(true) {
allowance.amount = 0;
write_allowance(
&mut deps.storage,
&owner_address,
&spender_address,
allowance,
)?;
return Err(insufficient_allowance(0, amount_raw));
}
if let Some(new_allowance) = allowance.amount.checked_sub(amount_raw) {
allowance.amount = new_allowance;
} else {
return Err(insufficient_allowance(allowance.amount, amount_raw));
}
write_allowance(
&mut deps.storage,
&owner_address,
&spender_address,
allowance,
)?;
perform_transfer(
&mut deps.storage,
&owner_address,
&recipient_address,
amount_raw,
)?;
let symbol = Config::from_storage(&mut deps.storage).constants()?.symbol;
store_transfer(
&mut deps.storage,
&owner_address,
&recipient_address,
amount,
symbol,
)?;
Ok(())
}
fn try_transfer_from<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
owner: &HumanAddr,
recipient: &HumanAddr,
amount: Uint128,
) -> StdResult<HandleResponse> {
try_transfer_from_impl(deps, env, owner, recipient, amount)?;
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::TransferFrom { status: Success })?),
};
Ok(res)
}
fn try_send_from<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
owner: &HumanAddr,
recipient: &HumanAddr,
amount: Uint128,
msg: Option<Binary>,
) -> StdResult<HandleResponse> {
let sender = env.message.sender.clone();
try_transfer_from_impl(deps, env, owner, recipient, amount)?;
let mut messages = vec![];
try_add_receiver_api_callback(&mut messages, &deps.storage, recipient, msg, sender, amount)?;
let res = HandleResponse {
messages,
log: vec![],
data: Some(to_binary(&HandleAnswer::SendFrom { status: Success })?),
};
Ok(res)
}
fn try_burn_from<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
owner: &HumanAddr,
amount: Uint128,
) -> StdResult<HandleResponse> {
let spender_address = deps.api.canonical_address(&env.message.sender)?;
let owner_address = deps.api.canonical_address(owner)?;
let amount = amount.u128();
let mut allowance = read_allowance(&deps.storage, &owner_address, &spender_address)?;
if allowance.expiration.map(|ex| ex < env.block.time) == Some(true) {
allowance.amount = 0;
write_allowance(
&mut deps.storage,
&owner_address,
&spender_address,
allowance,
)?;
return Err(insufficient_allowance(0, amount));
}
if let Some(new_allowance) = allowance.amount.checked_sub(amount) {
allowance.amount = new_allowance;
} else {
return Err(insufficient_allowance(allowance.amount, amount));
}
write_allowance(
&mut deps.storage,
&owner_address,
&spender_address,
allowance,
)?;
// subtract from owner account
let mut balances = Balances::from_storage(&mut deps.storage);
let mut account_balance = balances.balance(&owner_address);
if let Some(new_balance) = account_balance.checked_sub(amount) {
account_balance = new_balance;
} else {
return Err(StdError::generic_err(format!(
"insufficient funds to burn: balance={}, required={}",
account_balance, amount
)));
}
balances.set_account_balance(&owner_address, account_balance);
// remove from supply
let mut config = Config::from_storage(&mut deps.storage);
let mut total_supply = config.total_supply();
if let Some(new_total_supply) = total_supply.checked_sub(amount) {
total_supply = new_total_supply;
} else {
return Err(StdError::generic_err(
"You're trying to burn more than is available in the total supply",
));
}
config.set_total_supply(total_supply);
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::BurnFrom { status: Success })?),
};
Ok(res)
}
fn try_increase_allowance<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
spender: HumanAddr,
amount: Uint128,
expiration: Option<u64>,
) -> StdResult<HandleResponse> {
let owner_address = deps.api.canonical_address(&env.message.sender)?;
let spender_address = deps.api.canonical_address(&spender)?;
let mut allowance = read_allowance(&deps.storage, &owner_address, &spender_address)?;
allowance.amount = allowance.amount.saturating_add(amount.u128());
if expiration.is_some() {
allowance.expiration = expiration;
}
let new_amount = allowance.amount;
write_allowance(
&mut deps.storage,
&owner_address,
&spender_address,
allowance,
)?;
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::IncreaseAllowance {
owner: env.message.sender,
spender,
allowance: Uint128(new_amount),
})?),
};
Ok(res)
}
fn try_decrease_allowance<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
spender: HumanAddr,
amount: Uint128,
expiration: Option<u64>,
) -> StdResult<HandleResponse> {
let owner_address = deps.api.canonical_address(&env.message.sender)?;
let spender_address = deps.api.canonical_address(&spender)?;
let mut allowance = read_allowance(&deps.storage, &owner_address, &spender_address)?;
allowance.amount = allowance.amount.saturating_sub(amount.u128());
if expiration.is_some() {
allowance.expiration = expiration;
}
let new_amount = allowance.amount;
write_allowance(
&mut deps.storage,
&owner_address,
&spender_address,
allowance,
)?;
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::DecreaseAllowance {
owner: env.message.sender,
spender,
allowance: Uint128(new_amount),
})?),
};
Ok(res)
}
fn add_minters<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
minters_to_add: Vec<HumanAddr>,
) -> StdResult<HandleResponse> {
let mut config = Config::from_storage(&mut deps.storage);
check_if_admin(&config, &env.message.sender)?;
config.add_minters(minters_to_add)?;
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::AddMinters { status: Success })?),
})
}
fn remove_minters<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
minters_to_remove: Vec<HumanAddr>,
) -> StdResult<HandleResponse> {
let mut config = Config::from_storage(&mut deps.storage);
check_if_admin(&config, &env.message.sender)?;
config.remove_minters(minters_to_remove)?;
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::RemoveMinters { status: Success })?),
})
}
fn set_minters<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
minters_to_set: Vec<HumanAddr>,
) -> StdResult<HandleResponse> {
let mut config = Config::from_storage(&mut deps.storage);
check_if_admin(&config, &env.message.sender)?;
config.set_minters(minters_to_set)?;
Ok(HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::SetMinters { status: Success })?),
})
}
/// Burn tokens
///
/// Remove `amount` tokens from the system irreversibly, from signer account
///
/// @param amount the amount of money to burn
fn try_burn<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
amount: Uint128,
) -> StdResult<HandleResponse> {
let sender_address = deps.api.canonical_address(&env.message.sender)?;
let amount = amount.u128();
let mut balances = Balances::from_storage(&mut deps.storage);
let mut account_balance = balances.balance(&sender_address);
if let Some(new_account_balance) = account_balance.checked_sub(amount) {
account_balance = new_account_balance;
} else {
return Err(StdError::generic_err(format!(
"insufficient funds to burn: balance={}, required={}",
account_balance, amount
)));
}
balances.set_account_balance(&sender_address, account_balance);
let mut config = Config::from_storage(&mut deps.storage);
let mut total_supply = config.total_supply();
if let Some(new_total_supply) = total_supply.checked_sub(amount) {
total_supply = new_total_supply;
} else {
return Err(StdError::generic_err(
"You're trying to burn more than is available in the total supply",
));
}
config.set_total_supply(total_supply);
let res = HandleResponse {
messages: vec![],
log: vec![],
data: Some(to_binary(&HandleAnswer::Burn { status: Success })?),
};
Ok(res)
}
fn perform_transfer<T: Storage>(
store: &mut T,
from: &CanonicalAddr,
to: &CanonicalAddr,
amount: u128,
) -> StdResult<()> {
let mut balances = Balances::from_storage(store);
let mut from_balance = balances.balance(from);
if let Some(new_from_balance) = from_balance.checked_sub(amount) {
from_balance = new_from_balance;
} else {
return Err(StdError::generic_err(format!(
"Insufficient funds: balance={}, required={}",
from_balance, amount
)));
}
balances.set_account_balance(from, from_balance);
let mut to_balance = balances.balance(to);
to_balance = to_balance.checked_add(amount).ok_or_else(|| {
StdError::generic_err("This tx will literally make them too rich. Try transferring less")
})?;
balances.set_account_balance(to, to_balance);
Ok(())
}
fn is_admin<S: Storage>(config: &Config<S>, account: &HumanAddr) -> StdResult<bool> {
let consts = config.constants()?;
if &consts.admin != account {
return Ok(false);
}
Ok(true)
}
fn check_if_admin<S: Storage>(config: &Config<S>, account: &HumanAddr) -> StdResult<()> {
if !is_admin(config, account)? {
return Err(StdError::generic_err(
"This is an admin command. Admin commands can only be run from admin address",
));
}
Ok(())
}
fn is_valid_name(name: &str) -> bool {
let len = name.len();
3 <= len && len <= 30
}
fn is_valid_symbol(symbol: &str) -> bool {
let len = symbol.len();
let len_is_valid = 3 <= len && len <= 6;
len_is_valid && symbol.bytes().all(|byte| b'A' <= byte && byte <= b'Z')
}
// pub fn migrate<S: Storage, A: Api, Q: Querier>(
// _deps: &mut Extern<S, A, Q>,
// _env: Env,
// _msg: MigrateMsg,
// ) -> StdResult<MigrateResponse> {
// Ok(MigrateResponse::default())
// }
| 30.881591 | 105 | 0.622323 |
e2a1db9483dadd47e96db464273d19953e34da22 | 26,561 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
//! Functionality related to the command line interface of the Move prover.
use abigen::AbigenOptions;
use anyhow::anyhow;
use clap::{App, Arg};
use docgen::DocgenOptions;
use errmapgen::ErrmapOptions;
use log::LevelFilter;
use serde::{Deserialize, Serialize};
use simplelog::{
CombinedLogger, Config, ConfigBuilder, LevelPadding, SimpleLogger, TermLogger, TerminalMode,
};
use spec_lang::env::VerificationScope;
use std::sync::atomic::{AtomicBool, Ordering};
/// Represents the virtual path to the boogie prelude which is inlined into the binary.
pub const INLINE_PRELUDE: &str = "<inline-prelude>";
/// Default flags passed to boogie. Additional flags will be added to this via the -B option.
const DEFAULT_BOOGIE_FLAGS: &[&str] = &[
"-doModSetAnalysis",
"-printVerifiedProceduresCount:0",
"-printModel:4",
// Right now, we let boogie only produce one error per procedure. The boogie wrapper isn't
// capable to sort out multiple errors and associate them with models otherwise.
"-errorLimit:1",
];
/// Atomic used to prevent re-initialization of logging.
static LOGGER_CONFIGURED: AtomicBool = AtomicBool::new(false);
/// Atomic used to detect whether we are running in test mode.
static TEST_MODE: AtomicBool = AtomicBool::new(false);
/// Represents options provided to the tool. Most of those options are configured via a toml
/// source; some over the command line flags.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(default, deny_unknown_fields)]
pub struct Options {
/// Path to the boogie prelude. The special string `INLINE_PRELUDE` is used to refer to
/// a prelude build into this binary.
pub prelude_path: String,
/// The path to the boogie output which represents the verification problem.
pub output_path: String,
/// Verbosity level for logging.
pub verbosity_level: LevelFilter,
/// Whether to run the documentation generator instead of the prover.
pub run_docgen: bool,
/// Whether to run the ABI generator instead of the prover.
pub run_abigen: bool,
/// Whether to run the error map generator instead of the prover.
pub run_errmapgen: bool,
/// Whether to run a static analysis that computes the set of types that may be packed by the
/// Move code under analysis instead of the prover.
pub run_packed_types_gen: bool,
/// An account address to use if none is specified in the source.
pub account_address: String,
/// The paths to the Move sources.
pub move_sources: Vec<String>,
/// The paths to any dependencies for the Move sources. Those will not be verified but
/// can be used by `move_sources`.
pub move_deps: Vec<String>,
/// Options for the prover.
pub prover: ProverOptions,
/// Options for the prover backend.
pub backend: BackendOptions,
/// Options for the documentation generator.
pub docgen: DocgenOptions,
/// Options for the ABI generator.
pub abigen: AbigenOptions,
/// Options for the error map generator.
pub errmapgen: ErrmapOptions,
}
impl Default for Options {
fn default() -> Self {
Self {
prelude_path: INLINE_PRELUDE.to_string(),
output_path: "output.bpl".to_string(),
run_docgen: false,
run_abigen: false,
run_errmapgen: false,
run_packed_types_gen: false,
account_address: "0x234567".to_string(),
verbosity_level: LevelFilter::Info,
move_sources: vec![],
move_deps: vec![],
prover: ProverOptions::default(),
backend: BackendOptions::default(),
docgen: DocgenOptions::default(),
abigen: AbigenOptions::default(),
errmapgen: ErrmapOptions::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct ProverOptions {
/// Whether to only generate backend code.
pub generate_only: bool,
/// Whether to generate stubs for native functions.
pub native_stubs: bool,
/// Whether to minimize execution traces in errors.
pub minimize_execution_trace: bool,
/// Whether to omit debug information in generated model.
pub omit_model_debug: bool,
/// Whether output for e.g. diagnosis shall be stable/redacted so it can be used in test
/// output.
pub stable_test_output: bool,
/// Scope of what functions to verify.
pub verify_scope: VerificationScope,
/// [deprecated] Whether to emit global axiom that resources are well-formed.
pub resource_wellformed_axiom: bool,
/// Whether to assume wellformedness when elements are read from memory, instead of on
/// function entry.
pub assume_wellformed_on_access: bool,
/// Whether to assume a global invariant when the related memory
/// is accessed, instead of on function entry. This is currently known to be slower
/// if one than off, so off by default.
pub assume_invariant_on_access: bool,
/// Whether pack/unpack should recurse over the structure.
pub deep_pack_unpack: bool,
/// Whether to automatically debug trace values of specification expression leafs.
pub debug_trace: bool,
/// Report warnings. This is not on by default. We may turn it on if the warnings
/// are better filtered, e.g. do not contain unused schemas intended for other modules.
pub report_warnings: bool,
/// Whether to dump the transformed stackless bytecode to a file
pub dump_bytecode: bool,
/// Number of Boogie instances to be run concurrently.
pub num_instances: usize,
}
impl Default for ProverOptions {
fn default() -> Self {
Self {
generate_only: false,
native_stubs: false,
minimize_execution_trace: true,
omit_model_debug: false,
stable_test_output: false,
verify_scope: VerificationScope::Public,
resource_wellformed_axiom: false,
assume_wellformed_on_access: false,
deep_pack_unpack: false,
debug_trace: false,
report_warnings: false,
assume_invariant_on_access: false,
dump_bytecode: false,
num_instances: 1,
}
}
}
/// Backend options.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct BackendOptions {
/// Path to the boogie executable.
pub boogie_exe: String,
/// Path to the z3 executable.
pub z3_exe: String,
/// Whether to use cvc4.
pub use_cvc4: bool,
/// Path to the cvc4 executable.
pub cvc4_exe: String,
/// List of flags to pass on to boogie.
pub boogie_flags: Vec<String>,
/// Whether to use native array theory.
pub use_array_theory: bool,
/// Whether to produce an SMT file for each verification problem.
pub generate_smt: bool,
/// Whether native instead of stratified equality should be used.
pub native_equality: bool,
/// A string determining the type of requires used for parameter type checks. Can be
/// `"requires"` or `"free requires`".
pub type_requires: String,
/// The depth until which stratified functions are expanded.
pub stratification_depth: usize,
/// A string to be used to inline a function of medium size. Can be empty or `{:inline}`.
pub aggressive_func_inline: String,
/// A string to be used to inline a function of small size. Can be empty or `{:inline}`.
pub func_inline: String,
/// A bound to apply to the length of serialization results.
pub serialize_bound: usize,
/// How many times to call the prover backend for the verification problem. This is used for
/// benchmarking.
pub bench_repeat: usize,
/// Whether to use the sequence theory as the internal representation for $Vector type.
pub vector_using_sequences: bool,
/// A seed for the prover.
pub random_seed: usize,
/// The number of cores to use for parallel processing of verification conditions.
pub proc_cores: usize,
/// A (soft) timeout for the solver, per verification condition, in seconds.
pub vc_timeout: usize,
/// Whether Boogie output and log should be saved.
pub keep_artifacts: bool,
/// Eager threshold for quantifier instantiation.
pub eager_threshold: usize,
/// Lazy threshold for quantifier instantiation.
pub lazy_threshold: usize,
}
impl Default for BackendOptions {
fn default() -> Self {
let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new());
Self {
bench_repeat: 1,
boogie_exe: get_env("BOOGIE_EXE"),
z3_exe: get_env("Z3_EXE"),
use_cvc4: false,
cvc4_exe: get_env("CVC4_EXE"),
boogie_flags: vec![],
use_array_theory: false,
generate_smt: false,
native_equality: false,
type_requires: "free requires".to_owned(),
stratification_depth: 4,
aggressive_func_inline: "".to_owned(),
func_inline: "{:inline}".to_owned(),
serialize_bound: 4,
vector_using_sequences: false,
random_seed: 1,
proc_cores: 1,
vc_timeout: 40,
keep_artifacts: false,
eager_threshold: 100,
lazy_threshold: 100,
}
}
}
impl Options {
/// Creates options from toml configuration source.
pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> {
Ok(toml::from_str(toml_source)?)
}
/// Creates options from toml configuration file.
pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> {
Self::create_from_toml(&std::fs::read_to_string(toml_file)?)
}
// Creates options from command line arguments. This parses the arguments and terminates
// the program on errors, printing usage information. The first argument is expected to be
// the program name.
pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> {
// Clap definition of the command line interface.
let is_number = |s: String| {
s.parse::<usize>()
.map(|_| ())
.map_err(|_| "expected number".to_string())
};
let cli = App::new("mvp")
.version("0.1.0")
.about("The Move Prover")
.author("The Libra Core Contributors")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.takes_value(true)
.value_name("TOML_FILE")
.env("MOVE_PROVER_CONFIG")
.help("path to a configuration file. \
Values in this file will be overridden by command line flags"),
)
.arg(
Arg::with_name("config-str")
.conflicts_with("config")
.short("C")
.long("config-str")
.takes_value(true)
.multiple(true)
.number_of_values(1)
.value_name("TOML_STRING")
.help("inline configuration string in toml syntax. Can be repeated. \
Use as in `-C=prover.opt=value -C=backend.opt=value`"),
)
.arg(
Arg::with_name("print-config")
.long("print-config")
.help("prints the effective toml configuration, then exits")
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.value_name("BOOGIE_FILE")
.help("path to the boogie output which represents the verification problem"),
)
.arg(
Arg::with_name("verbosity")
.short("v")
.long("verbose")
.takes_value(true)
.possible_values(&["error", "warn", "info", "debug"])
.help("verbosity level."),
)
.arg(
Arg::with_name("generate-only")
.long("generate-only")
.help("only generate boogie file but do not call boogie"),
)
.arg(
Arg::with_name("warn")
.long("warn")
.short("w")
.help("produces warnings")
)
.arg(
Arg::with_name("trace")
.long("trace")
.short("t")
.help("enables automatic tracing of expressions in prover errors")
)
.arg(
Arg::with_name("keep")
.long("keep")
.short("k")
.help("keep intermediate artifacts of the backend around")
)
.arg(
Arg::with_name("seed")
.long("seed")
.short("s")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets a random seed for the prover (default 0)")
)
.arg(
Arg::with_name("cores")
.long("cores")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the number of cores to use. \
NOTE: multiple cores may currently lead to scrambled model \
output from boogie (default 1)")
)
.arg(
Arg::with_name("timeout")
.long("timeout")
.short("T")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets a timeout (in seconds) for each \
individual verification condition (default 40)")
)
.arg(
Arg::with_name("docgen")
.long("docgen")
.help("run the documentation generator instead of the prover. \
Generated docs will be written into the directory `./doc` unless configured otherwise via toml"),
)
.arg(
Arg::with_name("abigen")
.long("abigen")
.help("run the ABI generator instead of the prover. \
Generated ABIs will be written into the directory `./abi` unless configured otherwise via toml"),
)
.arg(
Arg::with_name("errmapgen")
.long("errmapgen")
.help("run the error map generator instead of the prover. \
The generated error map will be written to `errmap` unless configured otherwise"),
)
.arg(
Arg::with_name("packedtypesgen")
.long("packedtypesgen")
.help("run the packed types generator instead of the prover.")
)
.arg(
Arg::with_name("verify")
.long("verify")
.takes_value(true)
.possible_values(&["public", "all", "none"])
.value_name("SCOPE")
.help("default scope of verification \
(can be overridden by `pragma verify=true|false`)"),
)
.arg(
Arg::with_name("bench-repeat")
.long("bench-repeat")
.takes_value(true)
.value_name("COUNT")
.validator(is_number)
.help(
"for benchmarking: how many times to call the backend on the verification problem",
),
)
.arg(
Arg::with_name("dependencies")
.long("dependency")
.short("d")
.multiple(true)
.number_of_values(1)
.takes_value(true)
.value_name("PATH_TO_DEPENDENCY")
.help("path to a Move file, or a directory which will be searched for \
Move files, containing dependencies which will not be verified")
)
.arg(
Arg::with_name("sources")
.multiple(true)
.value_name("PATH_TO_SOURCE_FILE")
.min_values(1)
.help("the source files to verify"),
)
.arg(
Arg::with_name("eager-threshold")
.long("eager-threshold")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the eager threshold for quantifier instantiation (default 100)")
)
.arg(
Arg::with_name("lazy-threshold")
.long("lazy-threshold")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the lazy threshold for quantifier instantiation (default 100)")
)
.arg(
Arg::with_name("dump-bytecode")
.long("dump-bytecode")
.help("whether to dump the transformed bytecode to a file")
)
.arg(
Arg::with_name("num-instances")
.long("num-instances")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the number of Boogie instances to run concurrently (default 1)")
)
.after_help("More options available via `--config file` or `--config-str str`. \
Use `--print-config` to see format and current values. \
See `move-prover/src/cli.rs::Option` for documentation.");
// Parse the arguments. This will abort the program on parsing errors and print help.
// It will also accept options like --help.
let matches = cli.get_matches_from(args);
// Initialize options.
let get_vec = |s: &str| -> Vec<String> {
match matches.values_of(s) {
Some(vs) => vs.map(|v| v.to_string()).collect(),
_ => vec![],
}
};
let mut options = if matches.is_present("config") {
if matches.is_present("config-str") {
return Err(anyhow!(
"currently, if `--config` (including via $MOVE_PROVER_CONFIG) is given \
`--config-str` cannot be used. Consider editing your \
configuration file instead."
));
}
Self::create_from_toml_file(matches.value_of("config").unwrap())?
} else if matches.is_present("config-str") {
Self::create_from_toml(matches.value_of("config-str").unwrap())?
} else {
Options::default()
};
// Analyze arguments.
if matches.is_present("output") {
options.output_path = matches.value_of("output").unwrap().to_string();
}
if matches.is_present("verbosity") {
options.verbosity_level = match matches.value_of("verbosity").unwrap() {
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
_ => unreachable!("should not happen"),
}
}
if matches.occurrences_of("sources") > 0 {
options.move_sources = get_vec("sources");
}
if matches.occurrences_of("dependencies") > 0 {
options.move_deps = get_vec("dependencies");
}
if matches.is_present("verify") {
options.prover.verify_scope = match matches.value_of("verify").unwrap() {
"public" => VerificationScope::Public,
"all" => VerificationScope::All,
"none" => VerificationScope::None,
_ => unreachable!("should not happen"),
}
}
if matches.is_present("bench-repeat") {
options.backend.bench_repeat =
matches.value_of("bench-repeat").unwrap().parse::<usize>()?;
}
if matches.is_present("docgen") {
options.run_docgen = true;
}
if matches.is_present("abigen") {
options.run_abigen = true;
}
if matches.is_present("errmapgen") {
options.run_errmapgen = true;
}
if matches.is_present("packedtypesgen") {
options.run_packed_types_gen = true;
}
if matches.is_present("warn") {
options.prover.report_warnings = true;
}
if matches.is_present("trace") {
options.prover.debug_trace = true;
}
if matches.is_present("dump-bytecode") {
options.prover.dump_bytecode = true;
}
if matches.is_present("num-instances") {
let num_instances = matches
.value_of("num-instances")
.unwrap()
.parse::<usize>()?;
options.prover.num_instances = std::cmp::max(num_instances, 1); // at least one instance
}
if matches.is_present("keep") {
options.backend.keep_artifacts = true;
}
if matches.is_present("seed") {
options.backend.random_seed = matches.value_of("seed").unwrap().parse::<usize>()?;
}
if matches.is_present("timeout") {
options.backend.vc_timeout = matches.value_of("timeout").unwrap().parse::<usize>()?;
}
if matches.is_present("cores") {
options.backend.proc_cores = matches.value_of("cores").unwrap().parse::<usize>()?;
}
if matches.is_present("eager-threshold") {
options.backend.eager_threshold = matches
.value_of("eager-threshold")
.unwrap()
.parse::<usize>()?;
}
if matches.is_present("lazy-threshold") {
options.backend.lazy_threshold = matches
.value_of("lazy-threshold")
.unwrap()
.parse::<usize>()?;
}
if matches.is_present("print-config") {
println!("{}", toml::to_string(&options).unwrap());
Err(anyhow!("exiting"))
} else {
Ok(options)
}
}
/// Sets up logging based on provided options. This should be called as early as possible
/// and before any use of info!, warn! etc.
pub fn setup_logging(&self) {
CombinedLogger::init(vec![TermLogger::new(
self.verbosity_level,
ConfigBuilder::new()
.set_time_level(LevelFilter::Debug)
.set_level_padding(LevelPadding::Off)
.build(),
TerminalMode::Mixed,
)])
.expect("Unexpected CombinedLogger init failure");
}
pub fn setup_logging_for_test(&self) {
// Loggers are global static, so we have to protect against reinitializing.
if LOGGER_CONFIGURED.compare_and_swap(false, true, Ordering::Relaxed) {
return;
}
TEST_MODE.store(true, Ordering::Relaxed);
SimpleLogger::init(self.verbosity_level, Config::default())
.expect("UnexpectedSimpleLogger failure");
}
/// Returns command line to call boogie.
pub fn get_boogie_command(&self, boogie_file: &str) -> Vec<String> {
let mut result = vec![self.backend.boogie_exe.clone()];
let mut add = |sl: &[&str]| result.extend(sl.iter().map(|s| (*s).to_string()));
add(DEFAULT_BOOGIE_FLAGS);
if self.backend.use_cvc4 {
add(&[
"-proverOpt:SOLVER=cvc4",
&format!("-proverOpt:PROVER_PATH={}", &self.backend.cvc4_exe),
]);
} else {
add(&[&format!("-proverOpt:PROVER_PATH={}", &self.backend.z3_exe)]);
}
if self.backend.use_array_theory {
add(&[
"-useArrayTheory",
"/proverOpt:O:smt.array.extensional=false",
]);
} else {
add(&[&format!(
"-proverOpt:O:smt.QI.EAGER_THRESHOLD={}",
self.backend.eager_threshold
)]);
add(&[&format!(
"-proverOpt:O:smt.QI.LAZY_THRESHOLD={}",
self.backend.lazy_threshold
)]);
}
add(&[&format!(
"-vcsCores:{}",
if self.prover.stable_test_output {
// Do not use multiple cores if stable test output is requested.
// Error messages may appear in non-deterministic order otherwise.
1
} else {
self.backend.proc_cores
}
)]);
// TODO: see what we can make out of these flags.
//add(&["-proverOpt:O:smt.QI.PROFILE=true"]);
//add(&["-proverOpt:O:trace=true"]);
//add(&["-proverOpt:VERBOSITY=3"]);
//add(&["-proverOpt:C:-st"]);
if self.backend.generate_smt {
add(&["-proverLog:@[email protected]"]);
}
for f in &self.backend.boogie_flags {
add(&[f.as_str()]);
}
add(&[boogie_file]);
result
}
/// Returns name of file where to log boogie output.
pub fn get_boogie_log_file(&self, boogie_file: &str) -> String {
format!("{}.log", boogie_file)
}
/// Adjust a timeout value, given in seconds, for the runtime environment.
pub fn adjust_timeout(&self, time: usize) -> usize {
// If running on a Linux flavor as in Ci, add 100% to the timeout for added
// robustness against flakiness.
match std::env::consts::OS {
"linux" | "freebsd" | "openbsd" => time + time,
_ => time,
}
}
}
| 40.001506 | 117 | 0.55397 |
4ab2283f77d1d2854b9f65f6f79a3ded04466d70 | 2,263 | use crate::unit::UnitDisplay;
use crate::unit::UnitSystem;
use std::ops;
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct Mass {
grams: i64,
}
impl Mass {
pub fn from_grams(grams: i64) -> Mass {
Mass { grams }
}
pub fn to_grams(&self) -> i64 {
self.grams
}
pub fn to_pounds(&self) -> f64 {
self.grams as f64 * 0.000_453_592_37
}
}
impl UnitDisplay for Mass {
fn display_with_units(&self, units: UnitSystem) -> String {
match units {
UnitSystem::Metric => format!("{}g", self.grams),
UnitSystem::Imperial => format!("{:.3}lb", self.to_pounds()),
}
}
}
impl ops::Add<Mass> for Mass {
type Output = Mass;
fn add(self, rhs: Mass) -> Mass {
Mass {
grams: self.grams + rhs.grams,
}
}
}
impl ops::Mul<i64> for Mass {
type Output = Mass;
fn mul(self, rhs: i64) -> Mass {
if rhs <= 0 {
panic!("zero and negative mass are not allowed")
}
Mass {
grams: self.grams * rhs,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn tracks_mass() {
assert_eq!(Mass::from_grams(5), Mass { grams: 5 });
assert_eq!(Mass::from_grams(5), Mass::from_grams(5));
assert_eq!(Mass::from_grams(42).to_grams(), 42);
assert_ne!(Mass::from_grams(1), Mass::from_grams(2));
}
#[test]
pub fn implements_addition() {
assert_eq!(
Mass::from_grams(42) + Mass::from_grams(0),
Mass::from_grams(42)
);
assert_eq!(
Mass::from_grams(1) + Mass::from_grams(41),
Mass::from_grams(42)
);
}
#[test]
pub fn implements_multiplication() {
assert_eq!(Mass::from_grams(5) * 4, Mass::from_grams(20));
assert_eq!(Mass::from_grams(42) * 2, Mass::from_grams(84));
}
#[test]
pub fn displays_grams() {
assert_eq!(
Mass::from_grams(1000).display_with_units(UnitSystem::Metric),
"1000g"
);
}
#[test]
pub fn displays_pounds() {
assert_eq!(
Mass::from_grams(1000).display_with_units(UnitSystem::Imperial),
"0.454lb"
);
}
}
| 22.186275 | 76 | 0.531595 |
1ca9c704349d1e18f376af1086f2fc9d3a91ed0d | 31,814 | /*! Permutation testing
This module contains tests to check boundary conditions on all permutations of
the six variables present in `BitField` functions:
1. `Lsb0` and `Msb0` slice orderings
2. `u8`, `u16`, `u32`, `u64` slice storage types
3. `load` and `store` trait behaviors
4. `_le` and `_be` element orderings
5. `u8`, `u16`, `u32`, `u64` value transfer types
6. Empty slice and too-wide slice conditions
!*/
#![cfg(test)]
use super::*;
use crate::prelude::*;
#[test]
fn check_mask() {
for (n, mask) in &[(0, 0x00), (1, 0x01), (7, 0x7F), (8, 0xFF)][..] {
assert_eq!(mask_for::<u8>(*n), *mask);
}
for (n, mask) in &[(0, 0x0000), (1, 0x0001), (15, 0x7FFF), (16, 0xFFFF)][..]
{
assert_eq!(mask_for::<u16>(*n), *mask);
}
for (n, mask) in &[
(0, 0x0000_0000),
(1, 0x0000_0001),
(31, 0x7FFF_FFFF),
(32, 0xFFFF_FFFF),
][..]
{
assert_eq!(mask_for::<u32>(*n), *mask);
}
#[cfg(target_pointer_width = "64")]
for (n, mask) in &[
(0, 0x0000_0000_0000_0000),
(1, 0x0000_0000_0000_0001),
(63, 0x7FFF_FFFF_FFFF_FFFF),
(64, 0xFFFF_FFFF_FFFF_FFFF),
][..]
{
assert_eq!(mask_for::<u64>(*n), *mask);
}
}
#[test]
fn check_resize() {
assert_eq!(resize::<u8, u8>(0xA5u8), 0xA5u8);
assert_eq!(resize::<u8, u16>(0xA5u8), 0xA5u16);
assert_eq!(resize::<u8, u32>(0xA5u8), 0xA5u32);
assert_eq!(resize::<u16, u8>(0x1234u16), 0x34u8);
assert_eq!(resize::<u16, u16>(0x1234u16), 0x1234u16);
assert_eq!(resize::<u16, u32>(0x1234u16), 0x1234u32);
assert_eq!(resize::<u32, u8>(0x1234_5678u32), 0x78u8);
assert_eq!(resize::<u32, u16>(0x1234_5678u32), 0x5678u16);
assert_eq!(resize::<u32, u32>(0x1234_5678u32), 0x1234_5678u32);
#[cfg(target_pointer_width = "64")]
{
assert_eq!(resize::<u8, u64>(0xA5u8), 0xA5u64);
assert_eq!(resize::<u16, u64>(0x1234u16), 0x1234u64);
assert_eq!(resize::<u32, u64>(0x1234_5678u32), 0x1234_5678u64);
assert_eq!(resize::<u64, u8>(0x0123_4567_89AB_CDEFu64), 0xEFu8);
assert_eq!(resize::<u64, u16>(0x0123_4567_89AB_CDEFu64), 0xCDEFu16);
assert_eq!(resize::<u64, u32>(0x0123_4567_89AB_CDEFu64), 0x89AB_CDEFu32);
assert_eq!(
resize::<u64, u64>(0x0123_4567_89AB_CDEFu64),
0x0123_4567_89AB_CDEFu64
);
}
}
#[test]
#[should_panic]
fn bsl08_ll08_empty() {
BitSlice::<Lsb0, u8>::empty().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsl08_ll16_empty() {
BitSlice::<Lsb0, u8>::empty().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsl08_ll32_empty() {
BitSlice::<Lsb0, u8>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_ll64_empty() {
BitSlice::<Lsb0, u8>::empty().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsl08_lb08_empty() {
BitSlice::<Lsb0, u8>::empty().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsl08_lb16_empty() {
BitSlice::<Lsb0, u8>::empty().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsl08_lb32_empty() {
BitSlice::<Lsb0, u8>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_lb64_empty() {
BitSlice::<Lsb0, u8>::empty().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsl16_ll08_empty() {
BitSlice::<Lsb0, u16>::empty().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsl16_ll16_empty() {
BitSlice::<Lsb0, u16>::empty().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsl16_ll32_empty() {
BitSlice::<Lsb0, u16>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_ll64_empty() {
BitSlice::<Lsb0, u16>::empty().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsl16_lb08_empty() {
BitSlice::<Lsb0, u16>::empty().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsl16_lb16_empty() {
BitSlice::<Lsb0, u16>::empty().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsl16_lb32_empty() {
BitSlice::<Lsb0, u16>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_lb64_empty() {
BitSlice::<Lsb0, u16>::empty().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsl32_ll08_empty() {
BitSlice::<Lsb0, u32>::empty().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsl32_ll16_empty() {
BitSlice::<Lsb0, u32>::empty().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsl32_ll32_empty() {
BitSlice::<Lsb0, u32>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_ll64_empty() {
BitSlice::<Lsb0, u32>::empty().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsl32_lb08_empty() {
BitSlice::<Lsb0, u32>::empty().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsl32_lb16_empty() {
BitSlice::<Lsb0, u32>::empty().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsl32_lb32_empty() {
BitSlice::<Lsb0, u32>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_lb64_empty() {
BitSlice::<Lsb0, u32>::empty().load_be::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll08_empty() {
BitSlice::<Lsb0, u64>::empty().load_le::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll16_empty() {
BitSlice::<Lsb0, u64>::empty().load_le::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll32_empty() {
BitSlice::<Lsb0, u64>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll64_empty() {
BitSlice::<Lsb0, u64>::empty().load_le::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb08_empty() {
BitSlice::<Lsb0, u64>::empty().load_be::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb16_empty() {
BitSlice::<Lsb0, u64>::empty().load_be::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb32_empty() {
BitSlice::<Lsb0, u64>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb64_empty() {
BitSlice::<Lsb0, u64>::empty().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsm08_ll08_empty() {
BitSlice::<Msb0, u8>::empty().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsm08_ll16_empty() {
BitSlice::<Msb0, u8>::empty().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsm08_ll32_empty() {
BitSlice::<Msb0, u8>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_ll64_empty() {
BitSlice::<Msb0, u8>::empty().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsm08_lb08_empty() {
BitSlice::<Msb0, u8>::empty().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsm08_lb16_empty() {
BitSlice::<Msb0, u8>::empty().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsm08_lb32_empty() {
BitSlice::<Msb0, u8>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_lb64_empty() {
BitSlice::<Msb0, u8>::empty().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsm16_ll08_empty() {
BitSlice::<Msb0, u16>::empty().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsm16_ll16_empty() {
BitSlice::<Msb0, u16>::empty().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsm16_ll32_empty() {
BitSlice::<Msb0, u16>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_ll64_empty() {
BitSlice::<Msb0, u16>::empty().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsm16_lb08_empty() {
BitSlice::<Msb0, u16>::empty().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsm16_lb16_empty() {
BitSlice::<Msb0, u16>::empty().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsm16_lb32_empty() {
BitSlice::<Msb0, u16>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_lb64_empty() {
BitSlice::<Msb0, u16>::empty().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsm32_ll08_empty() {
BitSlice::<Msb0, u32>::empty().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsm32_ll16_empty() {
BitSlice::<Msb0, u32>::empty().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsm32_ll32_empty() {
BitSlice::<Msb0, u32>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_ll64_empty() {
BitSlice::<Msb0, u32>::empty().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsm32_lb08_empty() {
BitSlice::<Msb0, u32>::empty().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsm32_lb16_empty() {
BitSlice::<Msb0, u32>::empty().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsm32_lb32_empty() {
BitSlice::<Msb0, u32>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_lb64_empty() {
BitSlice::<Msb0, u32>::empty().load_be::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll08_empty() {
BitSlice::<Msb0, u64>::empty().load_le::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll16_empty() {
BitSlice::<Msb0, u64>::empty().load_le::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll32_empty() {
BitSlice::<Msb0, u64>::empty().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll64_empty() {
BitSlice::<Msb0, u64>::empty().load_le::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb08_empty() {
BitSlice::<Msb0, u64>::empty().load_be::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb16_empty() {
BitSlice::<Msb0, u64>::empty().load_be::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb32_empty() {
BitSlice::<Msb0, u64>::empty().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb64_empty() {
BitSlice::<Msb0, u64>::empty().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsl08_ll08_full() {
[0u8; 2].bits::<Lsb0>().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsl08_ll16_full() {
[0u8; 3].bits::<Lsb0>().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsl08_ll32_full() {
[0u8; 5].bits::<Lsb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_ll64_full() {
[0u8; 9].bits::<Lsb0>().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsl08_lb08_full() {
[0u8; 2].bits::<Lsb0>().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsl08_lb16_full() {
[0u8; 3].bits::<Lsb0>().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsl08_lb32_full() {
[0u8; 5].bits::<Lsb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_lb64_full() {
[0u8; 9].bits::<Lsb0>().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsl16_ll08_full() {
[0u16; 1].bits::<Lsb0>().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsl16_ll16_full() {
[0u16; 2].bits::<Lsb0>().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsl16_ll32_full() {
[0u16; 3].bits::<Lsb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_ll64_full() {
[0u16; 35].bits::<Lsb0>().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsl16_lb08_full() {
[0u16; 1].bits::<Lsb0>().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsl16_lb16_full() {
[0u16; 2].bits::<Lsb0>().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsl16_lb32_full() {
[0u16; 3].bits::<Lsb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_lb64_full() {
[0u16; 5].bits::<Lsb0>().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsl32_ll08_full() {
[0u32; 1].bits::<Lsb0>().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsl32_ll16_full() {
[0u32; 1].bits::<Lsb0>().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsl32_ll32_full() {
[0u32; 2].bits::<Lsb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_ll64_full() {
[0u32; 3].bits::<Lsb0>().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsl32_lb08_full() {
[0u32; 1].bits::<Lsb0>().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsl32_lb16_full() {
[0u32; 1].bits::<Lsb0>().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsl32_lb32_full() {
[0u32; 2].bits::<Lsb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_lb64_full() {
[0u32; 3].bits::<Lsb0>().load_be::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll08_full() {
[0u64; 1].bits::<Lsb0>().load_le::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll16_full() {
[0u64; 1].bits::<Lsb0>().load_le::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll32_full() {
[0u64; 1].bits::<Lsb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_ll64_full() {
[0u64; 2].bits::<Lsb0>().load_le::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb08_full() {
[0u64; 1].bits::<Lsb0>().load_be::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb16_full() {
[0u64; 1].bits::<Lsb0>().load_be::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb32_full() {
[0u64; 1].bits::<Lsb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_lb64_full() {
[0u64; 2].bits::<Lsb0>().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsm08_ll08_full() {
[0u8; 2].bits::<Msb0>().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsm08_ll16_full() {
[0u8; 3].bits::<Msb0>().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsm08_ll32_full() {
[0u8; 5].bits::<Msb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_ll64_full() {
[0u8; 9].bits::<Msb0>().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsm08_lb08_full() {
[0u8; 2].bits::<Msb0>().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsm08_lb16_full() {
[0u8; 3].bits::<Msb0>().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsm08_lb32_full() {
[0u8; 5].bits::<Msb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_lb64_full() {
[0u8; 9].bits::<Msb0>().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsm16_ll08_full() {
[0u16; 1].bits::<Msb0>().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsm16_ll16_full() {
[0u16; 2].bits::<Msb0>().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsm16_ll32_full() {
[0u16; 3].bits::<Msb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_ll64_full() {
[0u16; 5].bits::<Msb0>().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsm16_lb08_full() {
[0u16; 1].bits::<Msb0>().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsm16_lb16_full() {
[0u16; 2].bits::<Msb0>().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsm16_lb32_full() {
[0u16; 3].bits::<Msb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_lb64_full() {
[0u16; 5].bits::<Msb0>().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsm32_ll08_full() {
[0u32; 1].bits::<Msb0>().load_le::<u8>();
}
#[test]
#[should_panic]
fn bsm32_ll16_full() {
[0u32; 1].bits::<Msb0>().load_le::<u16>();
}
#[test]
#[should_panic]
fn bsm32_ll32_full() {
[0u32; 2].bits::<Msb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_ll64_full() {
[0u32; 3].bits::<Msb0>().load_le::<u64>();
}
#[test]
#[should_panic]
fn bsm32_lb08_full() {
[0u32; 1].bits::<Msb0>().load_be::<u8>();
}
#[test]
#[should_panic]
fn bsm32_lb16_full() {
[0u32; 1].bits::<Msb0>().load_be::<u16>();
}
#[test]
#[should_panic]
fn bsm32_lb32_full() {
[0u32; 2].bits::<Msb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_lb64_full() {
[0u32; 3].bits::<Msb0>().load_be::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll08_full() {
[0u64; 1].bits::<Msb0>().load_le::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll16_full() {
[0u64; 1].bits::<Msb0>().load_le::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll32_full() {
[0u64; 1].bits::<Msb0>().load_le::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_ll64_full() {
[0u64; 2].bits::<Msb0>().load_le::<u64>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb08_full() {
[0u64; 1].bits::<Msb0>().load_be::<u8>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb16_full() {
[0u64; 1].bits::<Msb0>().load_be::<u16>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb32_full() {
[0u64; 1].bits::<Msb0>().load_be::<u32>();
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_lb64_full() {
[0u64; 2].bits::<Msb0>().load_be::<u64>();
}
#[test]
#[should_panic]
fn bsl08_sl08_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsl08_sl16_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsl08_sl32_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_sl64_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsl08_sb08_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsl08_sb16_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsl08_sb32_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_sb64_empty() {
BitSlice::<Lsb0, u8>::empty_mut().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsl16_sl08_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsl16_sl16_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsl16_sl32_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_sl64_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsl16_sb08_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsl16_sb16_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsl16_sb32_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_sb64_empty() {
BitSlice::<Lsb0, u16>::empty_mut().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsl32_sl08_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsl32_sl16_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsl32_sl32_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_sl64_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsl32_sb08_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsl32_sb16_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsl32_sb32_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_sb64_empty() {
BitSlice::<Lsb0, u32>::empty_mut().store_be::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl08_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_le::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl16_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_le::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl32_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl64_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_le::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb08_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_be::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb16_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_be::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb32_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb64_empty() {
BitSlice::<Lsb0, u64>::empty_mut().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsm08_sl08_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsm08_sl16_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsm08_sl32_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_sl64_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsm08_sb08_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsm08_sb16_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsm08_sb32_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_sb64_empty() {
BitSlice::<Msb0, u8>::empty_mut().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsm16_sl08_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsm16_sl16_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsm16_sl32_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_sl64_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsm16_sb08_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsm16_sb16_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsm16_sb32_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_sb64_empty() {
BitSlice::<Msb0, u16>::empty_mut().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsm32_sl08_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsm32_sl16_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsm32_sl32_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_sl64_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsm32_sb08_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsm32_sb16_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsm32_sb32_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_sb64_empty() {
BitSlice::<Msb0, u32>::empty_mut().store_be::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl08_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_le::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl16_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_le::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl32_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl64_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_le::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb08_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_be::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb16_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_be::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb32_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb64_empty() {
BitSlice::<Msb0, u64>::empty_mut().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsl08_sl08_full() {
[0u8; 2].bits_mut::<Lsb0>().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsl08_sl16_full() {
[0u8; 3].bits_mut::<Lsb0>().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsl08_sl32_full() {
[0u8; 5].bits_mut::<Lsb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_sl64_full() {
[0u8; 9].bits_mut::<Lsb0>().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsl08_sb08_full() {
[0u8; 2].bits_mut::<Lsb0>().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsl08_sb16_full() {
[0u8; 3].bits_mut::<Lsb0>().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsl08_sb32_full() {
[0u8; 5].bits_mut::<Lsb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl08_sb64_full() {
[0u8; 9].bits_mut::<Lsb0>().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsl16_sl08_full() {
[0u16; 1].bits_mut::<Lsb0>().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsl16_sl16_full() {
[0u16; 2].bits_mut::<Lsb0>().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsl16_sl32_full() {
[0u16; 3].bits_mut::<Lsb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_sl64_full() {
[0u16; 5].bits_mut::<Lsb0>().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsl16_sb08_full() {
[0u16; 1].bits_mut::<Lsb0>().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsl16_sb16_full() {
[0u16; 2].bits_mut::<Lsb0>().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsl16_sb32_full() {
[0u16; 3].bits_mut::<Lsb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl16_sb64_full() {
[0u16; 5].bits_mut::<Lsb0>().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsl32_sl08_full() {
[0u32; 1].bits_mut::<Lsb0>().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsl32_sl16_full() {
[0u32; 1].bits_mut::<Lsb0>().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsl32_sl32_full() {
[0u32; 2].bits_mut::<Lsb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_sl64_full() {
[0u32; 3].bits_mut::<Lsb0>().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsl32_sb08_full() {
[0u32; 1].bits_mut::<Lsb0>().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsl32_sb16_full() {
[0u32; 1].bits_mut::<Lsb0>().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsl32_sb32_full() {
[0u32; 2].bits_mut::<Lsb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl32_sb64_full() {
[0u32; 3].bits_mut::<Lsb0>().store_be::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl08_full() {
[0u64; 1].bits_mut::<Lsb0>().store_le::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl16_full() {
[0u64; 1].bits_mut::<Lsb0>().store_le::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl32_full() {
[0u64; 1].bits_mut::<Lsb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sl64_full() {
[0u64; 2].bits_mut::<Lsb0>().store_le::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb08_full() {
[0u64; 1].bits_mut::<Lsb0>().store_be::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb16_full() {
[0u64; 1].bits_mut::<Lsb0>().store_be::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb32_full() {
[0u64; 1].bits_mut::<Lsb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsl64_sb64_full() {
[0u64; 2].bits_mut::<Lsb0>().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsm08_sl08_full() {
[0u8; 2].bits_mut::<Msb0>().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsm08_sl16_full() {
[0u8; 3].bits_mut::<Msb0>().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsm08_sl32_full() {
[0u8; 5].bits_mut::<Msb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_sl64_full() {
[0u8; 9].bits_mut::<Msb0>().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsm08_sb08_full() {
[0u8; 2].bits_mut::<Msb0>().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsm08_sb16_full() {
[0u8; 3].bits_mut::<Msb0>().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsm08_sb32_full() {
[0u8; 5].bits_mut::<Msb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm08_sb64_full() {
[0u8; 9].bits_mut::<Msb0>().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsm16_sl08_full() {
[0u16; 1].bits_mut::<Msb0>().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsm16_sl16_full() {
[0u16; 2].bits_mut::<Msb0>().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsm16_sl32_full() {
[0u16; 3].bits_mut::<Msb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_sl64_full() {
[0u16; 5].bits_mut::<Msb0>().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsm16_sb08_full() {
[0u16; 1].bits_mut::<Msb0>().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsm16_sb16_full() {
[0u16; 2].bits_mut::<Msb0>().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsm16_sb32_full() {
[0u16; 3].bits_mut::<Msb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm16_sb64_full() {
[0u16; 5].bits_mut::<Msb0>().store_be::<u64>(0);
}
#[test]
#[should_panic]
fn bsm32_sl08_full() {
[0u32; 1].bits_mut::<Msb0>().store_le::<u8>(0);
}
#[test]
#[should_panic]
fn bsm32_sl16_full() {
[0u32; 1].bits_mut::<Msb0>().store_le::<u16>(0);
}
#[test]
#[should_panic]
fn bsm32_sl32_full() {
[0u32; 2].bits_mut::<Msb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_sl64_full() {
[0u32; 3].bits_mut::<Msb0>().store_le::<u64>(0);
}
#[test]
#[should_panic]
fn bsm32_sb08_full() {
[0u32; 1].bits_mut::<Msb0>().store_be::<u8>(0);
}
#[test]
#[should_panic]
fn bsm32_sb16_full() {
[0u32; 1].bits_mut::<Msb0>().store_be::<u16>(0);
}
#[test]
#[should_panic]
fn bsm32_sb32_full() {
[0u32; 2].bits_mut::<Msb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm32_sb64_full() {
[0u32; 3].bits_mut::<Msb0>().store_be::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl08_full() {
[0u64; 1].bits_mut::<Msb0>().store_le::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl16_full() {
[0u64; 1].bits_mut::<Msb0>().store_le::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl32_full() {
[0u64; 1].bits_mut::<Msb0>().store_le::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sl64_full() {
[0u64; 2].bits_mut::<Msb0>().store_le::<u64>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb08_full() {
[0u64; 1].bits_mut::<Msb0>().store_be::<u8>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb16_full() {
[0u64; 1].bits_mut::<Msb0>().store_be::<u16>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb32_full() {
[0u64; 1].bits_mut::<Msb0>().store_be::<u32>(0);
}
#[cfg(target_pointer_width = "64")]
#[test]
#[should_panic]
fn bsm64_sb64_full() {
[0u64; 2].bits_mut::<Msb0>().store_be::<u64>(0);
}
| 18.400231 | 78 | 0.636198 |
d51eab2b1f88596eabe8162fd698b2decd2cc3d8 | 1,317 | // exports from <IOKit/hid/IOHIDValue.h>
use core_foundation_sys::base::{CFAllocatorRef, CFIndex, CFTypeID};
use crate::hid::{
base::{IOHIDElementRef, IOHIDValueRef},
keys::IOHIDValueScaleType
};
extern "C" {
pub fn IOHIDValueGetTypeID() -> CFTypeID;
pub fn IOHIDValueCreateWithIntegerValue(
allocator: CFAllocatorRef,
element: IOHIDElementRef,
timeStamp: u64,
value: CFIndex,
) -> IOHIDValueRef;
pub fn IOHIDValueCreateWithBytes(
allocator: CFAllocatorRef,
element: IOHIDElementRef,
timeStamp: u64,
bytes: *const u8,
length: CFIndex,
) -> IOHIDValueRef;
pub fn IOHIDValueCreateWithBytesNoCopy(
allocator: CFAllocatorRef,
element: IOHIDElementRef,
timeStamp: u64,
bytes: *const u8,
length: CFIndex,
) -> IOHIDValueRef;
pub fn IOHIDValueGetElement(value: IOHIDValueRef) -> IOHIDElementRef;
pub fn IOHIDValueGetTimeStamp(value: IOHIDValueRef) -> u64;
pub fn IOHIDValueGetLength(value: IOHIDValueRef) -> CFIndex;
pub fn IOHIDValueGetBytePtr(value: IOHIDValueRef) -> *const u8;
pub fn IOHIDValueGetIntegerValue(value: IOHIDValueRef) -> CFIndex;
pub fn IOHIDValueGetScaledValue(value: IOHIDValueRef, type_: IOHIDValueScaleType) -> f64;
}
| 27.4375 | 93 | 0.682612 |
1155e64e7d50f0cc6de231767c57327be8b48111 | 4,538 | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::Error;
#[cfg(not(feature = "mesalock_sgx"))]
extern crate std;
#[cfg(feature = "mesalock_sgx")]
use std;
use std::thread_local;
use js_sys::Uint8Array;
use wasm_bindgen::{prelude::*, JsCast};
// Maximum is 65536 bytes see https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues
const BROWSER_CRYPTO_BUFFER_SIZE: usize = 256;
enum RngSource {
Node(NodeCrypto),
Browser(BrowserCrypto, Uint8Array),
}
// JsValues are always per-thread, so we initialize RngSource for each thread.
// See: https://github.com/rustwasm/wasm-bindgen/pull/955
thread_local!(
static RNG_SOURCE: Result<RngSource, Error> = getrandom_init();
);
pub(crate) fn getrandom_inner(dest: &mut [u8]) -> Result<(), Error> {
RNG_SOURCE.with(|result| {
let source = result.as_ref().map_err(|&e| e)?;
match source {
RngSource::Node(n) => {
if n.random_fill_sync(dest).is_err() {
return Err(Error::NODE_RANDOM_FILL_SYNC);
}
}
RngSource::Browser(crypto, buf) => {
// getRandomValues does not work with all types of WASM memory,
// so we initially write to browser memory to avoid exceptions.
for chunk in dest.chunks_mut(BROWSER_CRYPTO_BUFFER_SIZE) {
// The chunk can be smaller than buf's length, so we call to
// JS to create a smaller view of buf without allocation.
let sub_buf = buf.subarray(0, chunk.len() as u32);
if crypto.get_random_values(&sub_buf).is_err() {
return Err(Error::WEB_GET_RANDOM_VALUES);
}
sub_buf.copy_to(chunk);
}
}
};
Ok(())
})
}
fn getrandom_init() -> Result<RngSource, Error> {
let global: Global = js_sys::global().unchecked_into();
if is_node(&global) {
let crypto = require("crypto").map_err(|_| Error::NODE_CRYPTO)?;
return Ok(RngSource::Node(crypto));
}
// Assume we are in some Web environment (browser or web worker). We get
// `self.crypto` (called `msCrypto` on IE), so we can call
// `crypto.getRandomValues`. If `crypto` isn't defined, we assume that
// we are in an older web browser and the OS RNG isn't available.
let crypto = match (global.crypto(), global.ms_crypto()) {
(c, _) if c.is_object() => c,
(_, c) if c.is_object() => c,
_ => return Err(Error::WEB_CRYPTO),
};
let buf = Uint8Array::new_with_length(BROWSER_CRYPTO_BUFFER_SIZE as u32);
Ok(RngSource::Browser(crypto, buf))
}
// Taken from https://www.npmjs.com/package/browser-or-node
fn is_node(global: &Global) -> bool {
let process = global.process();
if process.is_object() {
let versions = process.versions();
if versions.is_object() {
return versions.node().is_string();
}
}
false
}
#[wasm_bindgen]
extern "C" {
type Global; // Return type of js_sys::global()
// Web Crypto API (https://www.w3.org/TR/WebCryptoAPI/)
#[wasm_bindgen(method, getter, js_name = "msCrypto")]
fn ms_crypto(this: &Global) -> BrowserCrypto;
#[wasm_bindgen(method, getter)]
fn crypto(this: &Global) -> BrowserCrypto;
type BrowserCrypto;
#[wasm_bindgen(method, js_name = getRandomValues, catch)]
fn get_random_values(this: &BrowserCrypto, buf: &Uint8Array) -> Result<(), JsValue>;
// Node JS crypto module (https://nodejs.org/api/crypto.html)
#[wasm_bindgen(catch, js_name = "module.require")]
fn require(s: &str) -> Result<NodeCrypto, JsValue>;
type NodeCrypto;
#[wasm_bindgen(method, js_name = randomFillSync, catch)]
fn random_fill_sync(this: &NodeCrypto, buf: &mut [u8]) -> Result<(), JsValue>;
// Node JS process Object (https://nodejs.org/api/process.html)
#[wasm_bindgen(method, getter)]
fn process(this: &Global) -> Process;
type Process;
#[wasm_bindgen(method, getter)]
fn versions(this: &Process) -> Versions;
type Versions;
#[wasm_bindgen(method, getter)]
fn node(this: &Versions) -> JsValue;
}
| 36.015873 | 101 | 0.629132 |
14c932863c0d7f426a418d353e43830f626b025b | 11,618 | //! Low level interface for interacting with `TXTRecordRef`.
use crate::Result;
use bonjour_sys::{
TXTRecordContainsKey, TXTRecordCreate, TXTRecordDeallocate, TXTRecordGetBytesPtr,
TXTRecordGetCount, TXTRecordGetItemAtIndex, TXTRecordGetLength, TXTRecordGetValuePtr,
TXTRecordRef, TXTRecordRemoveValue, TXTRecordSetValue,
};
use libc::{c_char, c_uchar, c_void};
use std::ffi::CString;
use std::{fmt, mem, ptr};
/// Wraps the `ManagedTXTRecordRef` type from the raw Bonjour bindings.
///
/// `zeroconf::TxtRecord` provides the cross-platform bindings for this functionality.
pub struct ManagedTXTRecordRef(TXTRecordRef);
impl ManagedTXTRecordRef {
/// Creates a new empty TXT record
pub fn new() -> Self {
let record = unsafe {
let mut record: TXTRecordRef = mem::zeroed();
TXTRecordCreate(&mut record, 0, ptr::null_mut());
record
};
Self(record)
}
/// Delegate function for [`TXTRecordGetBytes()`].
///
/// [`TXTRecordGetBytes()`]: https://developer.apple.com/documentation/dnssd/1804717-txtrecordgetbytesptr?language=objc
pub fn get_bytes_ptr(&self) -> *const c_void {
unsafe { TXTRecordGetBytesPtr(&self.0) }
}
/// Delegate function for [`TXTRecordGetLength()`].
///
/// [`TXTRecordGetLength()`]: https://developer.apple.com/documentation/dnssd/1804720-txtrecordgetlength?language=objc
pub fn get_length(&self) -> u16 {
unsafe { TXTRecordGetLength(&self.0) }
}
/// Delegate function for [`TXTRecordRemoveValue()`].
///
/// # Safety
/// This function is unsafe because it makes no guarantees about `key` and `key` is
/// dereferenced. `key` is expected to be a non-null `*const c_char`.
///
/// [`TXTRecordRemoveValue()`]: https://developer.apple.com/documentation/dnssd/1804721-txtrecordremovevalue?language=objc
pub unsafe fn remove_value(&mut self, key: *const c_char) -> Result<()> {
bonjour!(
TXTRecordRemoveValue(&mut self.0, key),
"could not remove TXT record value"
)
}
/// Delegate function for [`TXTRecordSetValue`].
///
/// # Safety
/// This function is unsafe because it makes no guarantees about it's rew pointer arguments
/// that are dereferenced.
///
/// [`TXTRecordSetValue`]: https://developer.apple.com/documentation/dnssd/1804723-txtrecordsetvalue?language=objc
pub unsafe fn set_value(
&mut self,
key: *const c_char,
value_size: u8,
value: *const c_void,
) -> Result<()> {
bonjour!(
TXTRecordSetValue(&mut self.0, key, value_size, value),
"could not set TXT record value"
)
}
/// Delegate function for [`TXTRecordContainsKey`].
///
/// # Safety
/// This function is unsafe because it makes no guarantees about it's rew pointer arguments
/// that are dereferenced.
///
/// [`TXTRecordContainsKey`]: https://developer.apple.com/documentation/dnssd/1804705-txtrecordcontainskey?language=objc
pub unsafe fn contains_key(&self, key: *const c_char) -> bool {
TXTRecordContainsKey(self.get_length(), self.get_bytes_ptr(), key) == 1
}
/// Delegate function for [`TXTRecordGetCount`].
///
/// [`TXTRecordGetCount`]: https://developer.apple.com/documentation/dnssd/1804706-txtrecordgetcount?language=objc
pub fn get_count(&self) -> u16 {
_get_count(self.get_length(), self.get_bytes_ptr())
}
/// Delegate function for [`TXTRecordGetItemAtIndex`].
///
/// # Safety
/// This function is unsafe because it makes no guarantees about it's rew pointer arguments
/// that are dereferenced.
///
/// [`TXTRecordGetItemAtIndex`]: https://developer.apple.com/documentation/dnssd/1804708-txtrecordgetitematindex?language=objc
pub unsafe fn get_item_at_index(
&self,
item_index: u16,
key_buf_len: u16,
key: *mut c_char,
value_len: *mut u8,
value: *mut *const c_void,
) -> Result<()> {
_get_item_at_index(
self.get_length(),
self.get_bytes_ptr(),
item_index,
key_buf_len,
key,
value_len,
value,
)
}
/// Delegate function for [`TXTRecordGetValuePtr`].
///
/// # Safety
/// This function is unsafe because it makes no guarantees about it's rew pointer arguments
/// that are dereferenced.
///
/// [`TXTRecordGetValuePtr`]: https://developer.apple.com/documentation/dnssd/1804709-txtrecordgetvalueptr?language=objc
pub unsafe fn get_value_ptr(&self, key: *const c_char, value_len: *mut u8) -> *const c_void {
TXTRecordGetValuePtr(self.get_length(), self.get_bytes_ptr(), key, value_len)
}
pub(crate) unsafe fn clone_raw(raw: *const c_uchar, size: u16) -> Result<Self> {
let chars = c_string!(alloc(size as usize)).into_raw() as *mut c_uchar;
ptr::copy(raw, chars, size as usize);
let chars = CString::from_raw(chars as *mut c_char);
let mut record = Self::new();
for i in 0.._get_count(size, chars.as_ptr() as *const c_void) {
let key = c_string!(alloc(256));
let mut value_len: u8 = 0;
let mut value: *const c_void = ptr::null_mut();
_get_item_at_index(
size,
chars.as_ptr() as *const c_void,
i,
256,
key.as_ptr() as *mut c_char,
&mut value_len,
&mut value,
)?;
record.set_value(key.as_ptr() as *mut c_char, value_len, value)?;
}
Ok(record)
}
}
impl Default for ManagedTXTRecordRef {
fn default() -> Self {
Self::new()
}
}
impl Clone for ManagedTXTRecordRef {
fn clone(&self) -> Self {
unsafe {
Self::clone_raw(self.get_bytes_ptr() as *const c_uchar, self.get_length()).unwrap()
}
}
}
impl Drop for ManagedTXTRecordRef {
fn drop(&mut self) {
unsafe { TXTRecordDeallocate(&mut self.0) };
}
}
impl fmt::Debug for ManagedTXTRecordRef {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ManagedTXTRecordRef").finish()
}
}
unsafe impl Send for ManagedTXTRecordRef {}
unsafe impl Sync for ManagedTXTRecordRef {}
fn _get_count(length: u16, data: *const c_void) -> u16 {
unsafe { TXTRecordGetCount(length, data) }
}
fn _get_item_at_index(
length: u16,
data: *const c_void,
item_index: u16,
key_buf_len: u16,
key: *mut c_char,
value_len: *mut u8,
value: *mut *const c_void,
) -> Result<()> {
bonjour!(
TXTRecordGetItemAtIndex(length, data, item_index, key_buf_len, key, value_len, value),
"could get item at index for TXT record"
)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ffi::c_str;
#[test]
fn set_value_success() {
let mut record = ManagedTXTRecordRef::new();
let key = c_string!("foo");
let value = c_string!("bar");
let value_size = mem::size_of_val(&value) as u8;
unsafe {
record
.set_value(
key.as_ptr() as *const c_char,
value_size,
value.as_ptr() as *const c_void,
)
.unwrap();
}
let mut value_len: u8 = 0;
let result = unsafe {
c_str::raw_to_str(
record.get_value_ptr(key.as_ptr() as *const c_char, &mut value_len)
as *const c_char,
)
};
assert_eq!(result, "bar");
}
#[test]
fn set_value_null_success() {
let mut record = ManagedTXTRecordRef::new();
let key = c_string!("foo");
let value_size = 0;
unsafe {
record
.set_value(key.as_ptr() as *const c_char, value_size, ptr::null())
.unwrap();
}
let mut value_len: u8 = 0;
let result = unsafe { record.get_value_ptr(key.as_ptr() as *const c_char, &mut value_len) };
assert!(result.is_null());
}
#[test]
fn remove_value_success() {
let mut record = ManagedTXTRecordRef::new();
let key = c_string!("foo");
let value = c_string!("bar");
let value_size = mem::size_of_val(&value) as u8;
unsafe {
record
.set_value(
key.as_ptr() as *const c_char,
value_size,
value.as_ptr() as *const c_void,
)
.unwrap();
record.remove_value(key.as_ptr() as *const c_char).unwrap();
}
let mut value_len: u8 = 0;
let result = unsafe { record.get_value_ptr(key.as_ptr() as *const c_char, &mut value_len) };
assert!(result.is_null());
}
#[test]
#[should_panic]
fn remove_value_missing_key_panics() {
let mut record = ManagedTXTRecordRef::new();
let key = c_string!("foo");
unsafe {
record.remove_value(key.as_ptr() as *const c_char).unwrap();
}
}
#[test]
fn contains_key_success() {
let mut record = ManagedTXTRecordRef::new();
let key = c_string!("foo");
let value = c_string!("bar");
let value_size = mem::size_of_val(&value) as u8;
unsafe {
record
.set_value(
key.as_ptr() as *const c_char,
value_size,
value.as_ptr() as *const c_void,
)
.unwrap();
}
let no_val = c_string!("baz");
unsafe {
assert!(record.contains_key(key.as_ptr() as *const c_char));
assert!(!record.contains_key(no_val.as_ptr() as *const c_char));
}
}
#[test]
fn get_count_success() {
let mut record = ManagedTXTRecordRef::new();
let key = c_string!("foo");
let value = c_string!("bar");
let value_size = mem::size_of_val(&value) as u8;
unsafe {
record
.set_value(
key.as_ptr() as *const c_char,
value_size,
value.as_ptr() as *const c_void,
)
.unwrap();
}
assert_eq!(record.get_count(), 1);
}
#[test]
fn get_item_at_index() {
let mut record = ManagedTXTRecordRef::new();
let key = c_string!("foo");
let value = c_string!("bar");
let value_size = mem::size_of_val(&value) as u8;
unsafe {
record
.set_value(
key.as_ptr() as *const c_char,
value_size,
value.as_ptr() as *const c_void,
)
.unwrap();
}
let key = unsafe { c_string!(alloc(256)) };
let mut value_len: u8 = 0;
let mut value: *const c_void = ptr::null_mut();
unsafe {
record
.get_item_at_index(
0,
256,
key.as_ptr() as *mut c_char,
&mut value_len,
&mut value,
)
.unwrap();
let key = c_str::raw_to_str(key.as_ptr() as *const c_char);
let value = c_str::raw_to_str(value as *const c_char);
assert_eq!(key, "foo");
assert_eq!(value, "bar");
}
}
}
| 30.573684 | 130 | 0.561801 |
18d43569c36924c2c0b79edbc156fd7fa443c877 | 11,207 | use core::alloc::Layout;
use core::convert::TryFrom;
use core::fmt::{self, Debug};
use core::iter;
use core::ptr::{self, NonNull};
use core::slice;
use core::str;
use core::sync::atomic::{self, AtomicUsize};
use intrusive_collections::LinkedListLink;
use liblumen_core::offset_of;
use crate::borrow::CloneToProcess;
use crate::erts::exception::AllocResult;
use crate::erts::process::alloc::TermAlloc;
use crate::erts::process::Process;
use crate::erts::string::Encoding;
use crate::erts::term::prelude::*;
/// This is the header written alongside all procbin binaries in the heap,
/// it owns the refcount and the raw binary data
///
/// NOTE: It is critical that if you add fields to this struct, that you adjust
/// the implementation of `base_layout` and `ProcBin::from_slice`, as they must
/// manually calculate the data layout due to the fact that `ProcBinInner` is a
/// dynamically-sized type
#[repr(C)]
pub struct ProcBinInner {
refc: AtomicUsize,
flags: BinaryFlags,
data: [u8],
}
impl_static_header!(ProcBin, Term::HEADER_PROCBIN);
impl ProcBinInner {
/// Constructs a reference to a `ProcBinInner` given a pointer to
/// the memory containing the struct and the length of its variable-length
/// data
///
/// NOTE: For more information about how this works, see the detailed
/// explanation in the function docs for `HeapBin::from_raw_parts`
#[inline]
fn from_raw_parts(ptr: *const u8, len: usize) -> Boxed<Self> {
// Invariants of slice::from_raw_parts.
assert!(!ptr.is_null());
assert!(len <= isize::max_value() as usize);
unsafe {
let slice = core::slice::from_raw_parts(ptr as *const (), len);
Boxed::new_unchecked(slice as *const [()] as *mut Self)
}
}
#[inline]
fn as_bytes(&self) -> &[u8] {
&self.data
}
/// Produces the base layout for this struct, before the
/// dynamically sized data is factored in.
///
/// Returns the base layout + the offset of the flags field
#[inline]
fn base_layout() -> (Layout, usize) {
Layout::new::<AtomicUsize>()
.extend(Layout::new::<BinaryFlags>())
.unwrap()
}
}
impl Bitstring for ProcBinInner {
#[inline]
fn full_byte_len(&self) -> usize {
self.data.len()
}
#[inline]
unsafe fn as_byte_ptr(&self) -> *mut u8 {
self.data.as_ptr() as *mut u8
}
}
impl Binary for ProcBinInner {
#[inline]
fn flags(&self) -> &BinaryFlags {
&self.flags
}
}
impl IndexByte for ProcBinInner {
fn byte(&self, index: usize) -> u8 {
self.data[index]
}
}
impl Debug for ProcBinInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let ptr = unsafe { self.as_byte_ptr() };
let len = self.data.len();
f.debug_struct("ProcBinInner")
.field("refc", &self.refc)
.field("flags", &self.flags)
.field("data", &format!("bytes={},address={:p}", len, ptr))
.finish()
}
}
/// Reference-counted heap-allocated binary
///
/// This struct doesn't actually have the data, but it is the entry point
/// through which all consumers will access it, which ensures the reference
/// count is maintained correctly
#[derive(Debug)]
#[repr(C)]
pub struct ProcBin {
header: Header<ProcBin>,
inner: NonNull<ProcBinInner>,
pub link: LinkedListLink,
}
impl ProcBin {
#[inline]
pub fn inner_offset() -> usize {
offset_of!(ProcBin, inner)
}
/// Creates a new procbin from a str slice, by copying it to the heap
pub fn from_str(s: &str) -> AllocResult<Self> {
let encoding = Encoding::from_str(s);
Self::from_slice(s.as_bytes(), encoding)
}
/// Creates a new procbin from a raw byte slice, by copying it to the heap
pub fn from_slice(s: &[u8], encoding: Encoding) -> AllocResult<Self> {
use liblumen_core::sys::alloc as sys_alloc;
let (base_layout, flags_offset) = ProcBinInner::base_layout();
let (unpadded_layout, data_offset) = base_layout.extend(Layout::for_value(s)).unwrap();
// We pad to alignment so that the Layout produced here
// matches that returned by `Layout::for_value` on the
// final `ProcBinInner`
let layout = unpadded_layout.pad_to_align();
unsafe {
let non_null = sys_alloc::alloc(layout)?;
let len = s.len();
let ptr: *mut u8 = non_null.as_ptr();
ptr::write(ptr as *mut AtomicUsize, AtomicUsize::new(1));
let flags_ptr = ptr.offset(flags_offset as isize) as *mut BinaryFlags;
let flags = BinaryFlags::new(encoding).set_size(len);
ptr::write(flags_ptr, flags);
let data_ptr = ptr.offset(data_offset as isize);
ptr::copy_nonoverlapping(s.as_ptr(), data_ptr, len);
let inner = ProcBinInner::from_raw_parts(ptr, len);
Ok(Self {
header: Default::default(),
inner: inner.into(),
link: LinkedListLink::new(),
})
}
}
#[inline]
fn inner(&self) -> &ProcBinInner {
unsafe { self.inner.as_ref() }
}
// Non-inlined part of `drop`.
#[inline(never)]
unsafe fn drop_slow(&self) {
use liblumen_core::sys::alloc as sys_alloc;
if self.inner().refc.fetch_sub(1, atomic::Ordering::Release) == 1 {
atomic::fence(atomic::Ordering::Acquire);
let inner = self.inner.as_ref();
let layout = Layout::for_value(&inner);
sys_alloc::free(inner as *const _ as *mut u8, layout);
}
}
#[inline]
pub fn full_byte_iter<'a>(&'a self) -> iter::Copied<slice::Iter<'a, u8>> {
self.inner().as_bytes().iter().copied()
}
}
impl Bitstring for ProcBin {
#[inline]
fn full_byte_len(&self) -> usize {
self.inner().full_byte_len()
}
#[inline]
unsafe fn as_byte_ptr(&self) -> *mut u8 {
self.inner().as_byte_ptr()
}
}
impl Binary for ProcBin {
#[inline]
fn flags(&self) -> &BinaryFlags {
self.inner().flags()
}
}
impl AlignedBinary for ProcBin {
fn as_bytes(&self) -> &[u8] {
self.inner().as_bytes()
}
}
impl Clone for ProcBin {
#[inline]
fn clone(&self) -> Self {
self.inner().refc.fetch_add(1, atomic::Ordering::AcqRel);
Self {
header: self.header.clone(),
inner: self.inner,
link: LinkedListLink::new(),
}
}
}
impl CloneToProcess for ProcBin {
fn clone_to_process(&self, process: &Process) -> Term {
let mut heap = process.acquire_heap();
let boxed = self.clone_to_heap(&mut heap).unwrap();
let ptr: *mut Self = boxed.dyn_cast();
self.inner().refc.fetch_add(1, atomic::Ordering::AcqRel);
// Reify a reference to the newly written clone, and push it
// on to the process virtual heap
let clone = unsafe { &*ptr };
process.virtual_alloc(clone);
boxed
}
fn clone_to_heap<A>(&self, heap: &mut A) -> AllocResult<Term>
where
A: ?Sized + TermAlloc,
{
unsafe {
// Allocate space for the header
let layout = Layout::new::<Self>();
let ptr = heap.alloc_layout(layout)?.as_ptr() as *mut Self;
// Write the binary header with an empty link
ptr::write(
ptr,
Self {
header: self.header.clone(),
inner: self.inner,
link: LinkedListLink::new(),
},
);
// Reify result term
Ok(ptr.into())
}
}
fn size_in_words(&self) -> usize {
crate::erts::to_word_size(Layout::for_value(self).size())
}
}
impl Drop for ProcBin {
fn drop(&mut self) {
if self.inner().refc.fetch_sub(1, atomic::Ordering::Release) != 1 {
return;
}
// The following code is based on the Rust Arc<T> implementation, and
// their notes apply to us here:
//
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// In particular, while the contents of an Arc are usually immutable, it's
// possible to have interior writes to something like a Mutex<T>. Since a
// Mutex is not acquired when it is deleted, we can't rely on its
// synchronization logic to make writes in thread A visible to a destructor
// running in thread B.
//
// Also note that the Acquire fence here could probably be replaced with an
// Acquire load, which could improve performance in highly-contended
// situations. See [2].
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
// [2]: (https://github.com/rust-lang/rust/pull/41714)
atomic::fence(atomic::Ordering::Acquire);
// The refcount is now zero, so we are freeing the memory
unsafe {
self.drop_slow();
}
}
}
impl IndexByte for ProcBin {
fn byte(&self, index: usize) -> u8 {
self.inner().byte(index)
}
}
/// Given a raw pointer to the ProcBin, reborrows and clones it into a new reference.
///
/// # Safety
///
/// This function is unsafe due to dereferencing a raw pointer, but it is expected that
/// this is only ever called with a valid `ProcBin` pointer anyway. The primary risk
/// with obtaining a `ProcBin` via this function is if you leak it somehow, rather than
/// letting its `Drop` implementation run. Doing so will leave the reference count greater
/// than 1 forever, meaning memory will never get deallocated.
///
/// NOTE: This does not copy the binary, it only obtains a new `ProcBin`, which is
/// itself a reference to a binary held by a `ProcBinInner`.
impl From<Boxed<ProcBin>> for ProcBin {
fn from(boxed: Boxed<ProcBin>) -> Self {
boxed.as_ref().clone()
}
}
impl TryFrom<TypedTerm> for Boxed<ProcBin> {
type Error = TypeError;
fn try_from(typed_term: TypedTerm) -> Result<Self, Self::Error> {
match typed_term {
TypedTerm::ProcBin(term) => Ok(term),
_ => Err(TypeError),
}
}
}
| 33.354167 | 95 | 0.603908 |
1e7dce7c864096f192b2bdbee3d76899c959638e | 2,539 | use cursive::traits::Resizable;
use cursive::view::Nameable;
use cursive::views::*;
use cursive::{Cursive, CursiveExt};
use librespot_core::authentication::Credentials as RespotCredentials;
use librespot_protocol::authentication::AuthenticationType;
pub fn create_credentials() -> Result<RespotCredentials, String> {
let mut login_cursive = Cursive::default();
let info_buf = TextContent::new("Please login to Spotify\n");
let info_view = Dialog::around(TextView::new_with_content(info_buf))
.button("Login", move |s| {
let login_view = Dialog::new()
.title("Spotify login")
.content(
ListView::new()
.child(
"Username",
EditView::new().with_name("spotify_user").fixed_width(18),
)
.child(
"Password",
EditView::new()
.secret()
.with_name("spotify_password")
.fixed_width(18),
),
)
.button("Login", |s| {
let username = s
.call_on_name("spotify_user", |view: &mut EditView| view.get_content())
.unwrap()
.to_string();
let auth_data = s
.call_on_name("spotify_password", |view: &mut EditView| view.get_content())
.unwrap()
.to_string()
.as_bytes()
.to_vec();
s.set_user_data::<Result<RespotCredentials, String>>(Ok(RespotCredentials {
username,
auth_type: AuthenticationType::AUTHENTICATION_USER_PASS,
auth_data,
}));
s.quit();
})
.button("Quit", Cursive::quit);
s.pop_layer();
s.add_layer(login_view);
})
.button("Quit", Cursive::quit);
login_cursive.add_layer(info_view);
login_cursive.run();
login_cursive
.user_data()
.cloned()
.unwrap_or_else(|| Err("Didn't obtain any credentials".to_string()))
}
#[derive(Serialize, Deserialize, Debug)]
pub struct AuthResponse {
pub credentials: RespotCredentials,
pub error: Option<String>,
}
| 37.338235 | 99 | 0.479716 |
fffbab93301722dd78af6bbbf4a2c8b0bfec0a6f | 349,318 | //! The `blockstore` module provides functions for parallel verification of the
//! Proof of History ledger as well as iterative read, append write, and random
//! access read to a persistent file-based ledger.
pub use crate::{blockstore_db::BlockstoreError, blockstore_meta::SlotMeta};
use {
crate::{
ancestor_iterator::AncestorIterator,
blockstore_db::{
columns as cf, AccessType, BlockstoreRecoveryMode, Column, Database, IteratorDirection,
IteratorMode, LedgerColumn, Result, WriteBatch,
},
blockstore_meta::*,
entry::{create_ticks, Entry},
erasure::ErasureConfig,
leader_schedule_cache::LeaderScheduleCache,
next_slots_iterator::NextSlotsIterator,
shred::{
Result as ShredResult, Shred, ShredType, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
SHRED_PAYLOAD_SIZE,
},
},
bincode::deserialize,
log::*,
rayon::{
iter::{IntoParallelRefIterator, ParallelIterator},
ThreadPool,
},
rocksdb::DBRawIterator,
solana_measure::measure::Measure,
solana_metrics::{datapoint_debug, datapoint_error},
solana_rayon_threadlimit::get_thread_count,
solana_runtime::hardened_unpack::{unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
solana_sdk::{
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, MS_PER_TICK},
genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE},
hash::Hash,
pubkey::Pubkey,
sanitize::Sanitize,
signature::{Keypair, Signature, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_storage_proto::{StoredExtendedRewards, StoredTransactionStatusMeta},
solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, ConfirmedTransactionStatusWithSignature, Rewards,
TransactionStatusMeta, TransactionWithStatusMeta,
},
std::{
borrow::Cow,
cell::RefCell,
cmp,
collections::{hash_map::Entry as HashMapEntry, BTreeMap, BTreeSet, HashMap, HashSet},
convert::TryInto,
fs,
io::{Error as IoError, ErrorKind},
path::{Path, PathBuf},
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{sync_channel, Receiver, Sender, SyncSender, TrySendError},
Arc, Mutex, RwLock, RwLockWriteGuard,
},
time::Instant,
},
thiserror::Error,
trees::{Tree, TreeWalk},
};
pub mod blockstore_purge;
pub const BLOCKSTORE_DIRECTORY: &str = "rocksdb";
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("blockstore_{}", ix))
.build()
.unwrap()));
thread_local!(static PAR_THREAD_POOL_ALL_CPUS: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(num_cpus::get())
.thread_name(|ix| format!("blockstore_{}", ix))
.build()
.unwrap()));
pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000;
pub const MAX_TURBINE_PROPAGATION_IN_MS: u64 = 100;
pub const MAX_TURBINE_DELAY_IN_TICKS: u64 = MAX_TURBINE_PROPAGATION_IN_MS / MS_PER_TICK;
// An upper bound on maximum number of data shreds we can handle in a slot
// 32K shreds would allow ~320K peak TPS
// (32K shreds per slot * 4 TX per shred * 2.5 slots per sec)
pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768;
pub type CompletedSlotsSender = SyncSender<Vec<Slot>>;
pub type CompletedSlotsReceiver = Receiver<Vec<Slot>>;
type CompletedRanges = Vec<(u32, u32)>;
#[derive(Default)]
pub struct SignatureInfosForAddress {
pub infos: Vec<ConfirmedTransactionStatusWithSignature>,
pub found_before: bool,
}
#[derive(Clone, Copy)]
pub enum PurgeType {
Exact,
PrimaryIndex,
CompactionFilter,
}
#[derive(Error, Debug)]
pub enum InsertDataShredError {
Exists,
InvalidShred,
BlockstoreError(#[from] BlockstoreError),
}
impl std::fmt::Display for InsertDataShredError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "insert data shred error")
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct CompletedDataSetInfo {
pub slot: Slot,
pub start_index: u32,
pub end_index: u32,
}
pub struct BlockstoreSignals {
pub blockstore: Blockstore,
pub ledger_signal_receiver: Receiver<bool>,
pub completed_slots_receiver: CompletedSlotsReceiver,
}
// ledger window
//
// NOTE: allowing dead_code only because stubbing bank_hash_cf and program_cost_cf
// to 1.7 for rocksdb backward compatibility
#[allow(dead_code)]
pub struct Blockstore {
ledger_path: PathBuf,
db: Arc<Database>,
meta_cf: LedgerColumn<cf::SlotMeta>,
dead_slots_cf: LedgerColumn<cf::DeadSlots>,
duplicate_slots_cf: LedgerColumn<cf::DuplicateSlots>,
erasure_meta_cf: LedgerColumn<cf::ErasureMeta>,
orphans_cf: LedgerColumn<cf::Orphans>,
index_cf: LedgerColumn<cf::Index>,
data_shred_cf: LedgerColumn<cf::ShredData>,
code_shred_cf: LedgerColumn<cf::ShredCode>,
transaction_status_cf: LedgerColumn<cf::TransactionStatus>,
address_signatures_cf: LedgerColumn<cf::AddressSignatures>,
transaction_memos_cf: LedgerColumn<cf::TransactionMemos>,
transaction_status_index_cf: LedgerColumn<cf::TransactionStatusIndex>,
active_transaction_status_index: RwLock<u64>,
rewards_cf: LedgerColumn<cf::Rewards>,
blocktime_cf: LedgerColumn<cf::Blocktime>,
perf_samples_cf: LedgerColumn<cf::PerfSamples>,
block_height_cf: LedgerColumn<cf::BlockHeight>,
program_costs_cf: LedgerColumn<cf::ProgramCosts>,
bank_hash_cf: LedgerColumn<cf::BankHash>,
last_root: Arc<RwLock<Slot>>,
insert_shreds_lock: Arc<Mutex<()>>,
pub new_shreds_signals: Vec<SyncSender<bool>>,
pub completed_slots_senders: Vec<CompletedSlotsSender>,
pub lowest_cleanup_slot: Arc<RwLock<Slot>>,
no_compaction: bool,
slots_stats: Arc<Mutex<SlotsStats>>,
}
struct SlotsStats {
last_cleanup_ts: Instant,
stats: BTreeMap<Slot, SlotStats>,
}
impl Default for SlotsStats {
fn default() -> Self {
SlotsStats {
last_cleanup_ts: Instant::now(),
stats: BTreeMap::new(),
}
}
}
#[derive(Default)]
struct SlotStats {
num_repaired: usize,
num_recovered: usize,
}
pub struct IndexMetaWorkingSetEntry {
index: Index,
// true only if at least one shred for this Index was inserted since the time this
// struct was created
did_insert_occur: bool,
}
pub struct SlotMetaWorkingSetEntry {
new_slot_meta: Rc<RefCell<SlotMeta>>,
old_slot_meta: Option<SlotMeta>,
// True only if at least one shred for this SlotMeta was inserted since the time this
// struct was created.
did_insert_occur: bool,
}
#[derive(PartialEq, Debug, Clone)]
enum ShredSource {
Turbine,
Repaired,
Recovered,
}
#[derive(Default)]
pub struct BlockstoreInsertionMetrics {
pub num_shreds: usize,
pub insert_lock_elapsed: u64,
pub insert_shreds_elapsed: u64,
pub shred_recovery_elapsed: u64,
pub chaining_elapsed: u64,
pub commit_working_sets_elapsed: u64,
pub write_batch_elapsed: u64,
pub total_elapsed: u64,
pub num_inserted: u64,
pub num_repair: u64,
pub num_recovered: usize,
num_recovered_blockstore_error: usize,
pub num_recovered_inserted: usize,
pub num_recovered_failed_sig: usize,
pub num_recovered_failed_invalid: usize,
pub num_recovered_exists: usize,
pub index_meta_time: u64,
num_data_shreds_exists: usize,
num_data_shreds_invalid: usize,
num_data_shreds_blockstore_error: usize,
num_coding_shreds_exists: usize,
num_coding_shreds_invalid: usize,
num_coding_shreds_invalid_erasure_config: usize,
num_coding_shreds_inserted: usize,
}
impl SlotMetaWorkingSetEntry {
fn new(new_slot_meta: Rc<RefCell<SlotMeta>>, old_slot_meta: Option<SlotMeta>) -> Self {
Self {
new_slot_meta,
old_slot_meta,
did_insert_occur: false,
}
}
}
impl BlockstoreInsertionMetrics {
pub fn report_metrics(&self, metric_name: &'static str) {
datapoint_info!(
metric_name,
("num_shreds", self.num_shreds as i64, i64),
("total_elapsed", self.total_elapsed as i64, i64),
("insert_lock_elapsed", self.insert_lock_elapsed as i64, i64),
(
"insert_shreds_elapsed",
self.insert_shreds_elapsed as i64,
i64
),
(
"shred_recovery_elapsed",
self.shred_recovery_elapsed as i64,
i64
),
("chaining_elapsed", self.chaining_elapsed as i64, i64),
(
"commit_working_sets_elapsed",
self.commit_working_sets_elapsed as i64,
i64
),
("write_batch_elapsed", self.write_batch_elapsed as i64, i64),
("num_inserted", self.num_inserted as i64, i64),
("num_repair", self.num_repair as i64, i64),
("num_recovered", self.num_recovered as i64, i64),
(
"num_recovered_inserted",
self.num_recovered_inserted as i64,
i64
),
(
"num_recovered_failed_sig",
self.num_recovered_failed_sig as i64,
i64
),
(
"num_recovered_failed_invalid",
self.num_recovered_failed_invalid as i64,
i64
),
(
"num_recovered_exists",
self.num_recovered_exists as i64,
i64
),
(
"num_recovered_blockstore_error",
self.num_recovered_blockstore_error,
i64
),
("num_data_shreds_exists", self.num_data_shreds_exists, i64),
("num_data_shreds_invalid", self.num_data_shreds_invalid, i64),
(
"num_data_shreds_blockstore_error",
self.num_data_shreds_blockstore_error,
i64
),
(
"num_coding_shreds_exists",
self.num_coding_shreds_exists,
i64
),
(
"num_coding_shreds_invalid",
self.num_coding_shreds_invalid,
i64
),
(
"num_coding_shreds_invalid_erasure_config",
self.num_coding_shreds_invalid_erasure_config,
i64
),
(
"num_coding_shreds_inserted",
self.num_coding_shreds_inserted,
i64
),
);
}
}
impl Blockstore {
pub fn db(self) -> Arc<Database> {
self.db
}
pub fn ledger_path(&self) -> &Path {
&self.ledger_path
}
/// Opens a Ledger in directory, provides "infinite" window of shreds
pub fn open(ledger_path: &Path) -> Result<Blockstore> {
Self::do_open(ledger_path, AccessType::PrimaryOnly, None, true)
}
pub fn open_with_access_type(
ledger_path: &Path,
access_type: AccessType,
recovery_mode: Option<BlockstoreRecoveryMode>,
enforce_ulimit_nofile: bool,
) -> Result<Blockstore> {
Self::do_open(
ledger_path,
access_type,
recovery_mode,
enforce_ulimit_nofile,
)
}
fn do_open(
ledger_path: &Path,
access_type: AccessType,
recovery_mode: Option<BlockstoreRecoveryMode>,
enforce_ulimit_nofile: bool,
) -> Result<Blockstore> {
fs::create_dir_all(&ledger_path)?;
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
adjust_ulimit_nofile(enforce_ulimit_nofile)?;
// Open the database
let mut measure = Measure::start("open");
info!("Opening database at {:?}", blockstore_path);
let db = Database::open(&blockstore_path, access_type, recovery_mode)?;
// Create the metadata column family
let meta_cf = db.column();
// Create the dead slots column family
let dead_slots_cf = db.column();
let duplicate_slots_cf = db.column();
let erasure_meta_cf = db.column();
// Create the orphans column family. An "orphan" is defined as
// the head of a detached chain of slots, i.e. a slot with no
// known parent
let orphans_cf = db.column();
let index_cf = db.column();
let data_shred_cf = db.column();
let code_shred_cf = db.column();
let transaction_status_cf = db.column();
let address_signatures_cf = db.column();
let transaction_memos_cf = db.column();
let transaction_status_index_cf = db.column();
let rewards_cf = db.column();
let blocktime_cf = db.column();
let perf_samples_cf = db.column();
let block_height_cf = db.column();
let program_costs_cf = db.column();
let bank_hash_cf = db.column();
let db = Arc::new(db);
// Get max root or 0 if it doesn't exist
let max_root = db
.iter::<cf::Root>(IteratorMode::End)?
.next()
.map(|(slot, _)| slot)
.unwrap_or(0);
let last_root = Arc::new(RwLock::new(max_root));
// Get active transaction-status index or 0
let active_transaction_status_index = db
.iter::<cf::TransactionStatusIndex>(IteratorMode::Start)?
.next();
let initialize_transaction_status_index = active_transaction_status_index.is_none();
let active_transaction_status_index = active_transaction_status_index
.and_then(|(_, data)| {
let index0: TransactionStatusIndexMeta = deserialize(&data).unwrap();
if index0.frozen {
Some(1)
} else {
None
}
})
.unwrap_or(0);
measure.stop();
info!("{:?} {}", blockstore_path, measure);
let blockstore = Blockstore {
ledger_path: ledger_path.to_path_buf(),
db,
meta_cf,
dead_slots_cf,
duplicate_slots_cf,
erasure_meta_cf,
orphans_cf,
index_cf,
data_shred_cf,
code_shred_cf,
transaction_status_cf,
address_signatures_cf,
transaction_memos_cf,
transaction_status_index_cf,
active_transaction_status_index: RwLock::new(active_transaction_status_index),
rewards_cf,
blocktime_cf,
perf_samples_cf,
block_height_cf,
program_costs_cf,
bank_hash_cf,
new_shreds_signals: vec![],
completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),
last_root,
lowest_cleanup_slot: Arc::new(RwLock::new(0)),
no_compaction: false,
slots_stats: Arc::new(Mutex::new(SlotsStats::default())),
};
if initialize_transaction_status_index {
blockstore.initialize_transaction_status_index()?;
}
Ok(blockstore)
}
pub fn open_with_signal(
ledger_path: &Path,
recovery_mode: Option<BlockstoreRecoveryMode>,
enforce_ulimit_nofile: bool,
) -> Result<BlockstoreSignals> {
let mut blockstore = Self::open_with_access_type(
ledger_path,
AccessType::PrimaryOnly,
recovery_mode,
enforce_ulimit_nofile,
)?;
let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1);
let (completed_slots_sender, completed_slots_receiver) =
sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL);
blockstore.new_shreds_signals = vec![ledger_signal_sender];
blockstore.completed_slots_senders = vec![completed_slots_sender];
Ok(BlockstoreSignals {
blockstore,
ledger_signal_receiver,
completed_slots_receiver,
})
}
pub fn add_tree(
&self,
forks: Tree<Slot>,
is_orphan: bool,
is_slot_complete: bool,
num_ticks: u64,
starting_hash: Hash,
) {
let mut walk = TreeWalk::from(forks);
let mut blockhashes = HashMap::new();
while let Some(visit) = walk.get() {
let slot = visit.node().data;
if self.meta(slot).unwrap().is_some() && self.orphan(slot).unwrap().is_none() {
// If slot exists in blockstore and is not an orphan, then skip it
walk.forward();
continue;
}
let parent = walk.get_parent().map(|n| n.data);
if parent.is_some() || !is_orphan {
let parent_hash = parent
// parent won't exist for first node in a tree where
// `is_orphan == true`
.and_then(|parent| blockhashes.get(&parent))
.unwrap_or(&starting_hash);
let mut entries = create_ticks(
num_ticks * (std::cmp::max(1, slot - parent.unwrap_or(slot))),
0,
*parent_hash,
);
blockhashes.insert(slot, entries.last().unwrap().hash);
if !is_slot_complete {
entries.pop().unwrap();
}
let shreds = entries_to_test_shreds(
entries.clone(),
slot,
parent.unwrap_or(slot),
is_slot_complete,
0,
);
self.insert_shreds(shreds, None, false).unwrap();
}
walk.forward();
}
}
pub fn set_no_compaction(&mut self, no_compaction: bool) {
self.no_compaction = no_compaction;
}
pub fn destroy(ledger_path: &Path) -> Result<()> {
// Database::destroy() fails if the path doesn't exist
fs::create_dir_all(ledger_path)?;
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
Database::destroy(&blockstore_path)
}
pub fn meta(&self, slot: Slot) -> Result<Option<SlotMeta>> {
self.meta_cf.get(slot)
}
pub fn is_full(&self, slot: Slot) -> bool {
if let Ok(Some(meta)) = self.meta_cf.get(slot) {
return meta.is_full();
}
false
}
pub fn erasure_meta(&self, slot: Slot, set_index: u64) -> Result<Option<ErasureMeta>> {
self.erasure_meta_cf.get((slot, set_index))
}
pub fn orphan(&self, slot: Slot) -> Result<Option<bool>> {
self.orphans_cf.get(slot)
}
// Get max root or 0 if it doesn't exist
pub fn max_root(&self) -> Slot {
self.db
.iter::<cf::Root>(IteratorMode::End)
.expect("Couldn't get rooted iterator for max_root()")
.next()
.map(|(slot, _)| slot)
.unwrap_or(0)
}
pub fn slot_meta_iterator(
&self,
slot: Slot,
) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + '_> {
let meta_iter = self
.db
.iter::<cf::SlotMeta>(IteratorMode::From(slot, IteratorDirection::Forward))?;
Ok(meta_iter.map(|(slot, slot_meta_bytes)| {
(
slot,
deserialize(&slot_meta_bytes).unwrap_or_else(|e| {
panic!("Could not deserialize SlotMeta for slot {}: {:?}", slot, e)
}),
)
}))
}
#[allow(dead_code)]
pub fn live_slots_iterator(&self, root: Slot) -> impl Iterator<Item = (Slot, SlotMeta)> + '_ {
let root_forks = NextSlotsIterator::new(root, self);
let orphans_iter = self.orphans_iterator(root + 1).unwrap();
root_forks.chain(orphans_iter.flat_map(move |orphan| NextSlotsIterator::new(orphan, self)))
}
pub fn slot_data_iterator(
&self,
slot: Slot,
index: u64,
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
let slot_iterator = self.db.iter::<cf::ShredData>(IteratorMode::From(
(slot, index),
IteratorDirection::Forward,
))?;
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
}
pub fn slot_coding_iterator(
&self,
slot: Slot,
index: u64,
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
let slot_iterator = self.db.iter::<cf::ShredCode>(IteratorMode::From(
(slot, index),
IteratorDirection::Forward,
))?;
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
}
pub fn rooted_slot_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
let slot_iterator = self
.db
.iter::<cf::Root>(IteratorMode::From(slot, IteratorDirection::Forward))?;
Ok(slot_iterator.map(move |(rooted_slot, _)| rooted_slot))
}
fn get_recovery_data_shreds<'a>(
index: &'a Index,
slot: Slot,
erasure_meta: &'a ErasureMeta,
prev_inserted_datas: &'a mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
data_cf: &'a LedgerColumn<cf::ShredData>,
) -> impl Iterator<Item = Shred> + 'a {
erasure_meta.data_shreds_indices().filter_map(move |i| {
if let Some(shred) = prev_inserted_datas.remove(&(slot, i)) {
return Some(shred);
}
if !index.data().is_present(i) {
return None;
}
match data_cf.get_bytes((slot, i)).unwrap() {
None => {
warn!("Data shred deleted while reading for recovery");
None
}
Some(data) => Shred::new_from_serialized_shred(data).ok(),
}
})
}
fn get_recovery_coding_shreds<'a>(
index: &'a mut Index,
slot: Slot,
erasure_meta: &'a ErasureMeta,
prev_inserted_codes: &'a mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
code_cf: &'a LedgerColumn<cf::ShredCode>,
) -> impl Iterator<Item = Shred> + 'a {
erasure_meta.coding_shreds_indices().filter_map(move |i| {
if let Some(shred) = prev_inserted_codes.remove(&(slot, i)) {
// Remove from the index so it doesn't get committed. We know
// this is safe to do because everything in
// `prev_inserted_codes` does not yet exist in blockstore
// (guaranteed by `check_cache_coding_shred`)
index.coding_mut().set_present(i, false);
return Some(shred);
}
if !index.coding().is_present(i) {
return None;
}
match code_cf.get_bytes((slot, i)).unwrap() {
None => {
warn!("Code shred deleted while reading for recovery");
None
}
Some(code) => Shred::new_from_serialized_shred(code).ok(),
}
})
}
fn recover_shreds(
index: &mut Index,
erasure_meta: &ErasureMeta,
prev_inserted_datas: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
prev_inserted_codes: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
recovered_data_shreds: &mut Vec<Shred>,
data_cf: &LedgerColumn<cf::ShredData>,
code_cf: &LedgerColumn<cf::ShredCode>,
) {
// Find shreds for this erasure set and try recovery
let slot = index.slot;
let mut available_shreds: Vec<_> =
Self::get_recovery_data_shreds(index, slot, erasure_meta, prev_inserted_datas, data_cf)
.collect();
available_shreds.extend(Self::get_recovery_coding_shreds(
index,
slot,
erasure_meta,
prev_inserted_codes,
code_cf,
));
if let Ok(mut result) = Shredder::try_recovery(available_shreds) {
Self::submit_metrics(slot, erasure_meta, true, "complete".into(), result.len());
recovered_data_shreds.append(&mut result);
} else {
Self::submit_metrics(slot, erasure_meta, true, "incomplete".into(), 0);
}
}
fn submit_metrics(
slot: Slot,
erasure_meta: &ErasureMeta,
attempted: bool,
status: String,
recovered: usize,
) {
let mut data_shreds_indices = erasure_meta.data_shreds_indices();
let start_index = data_shreds_indices.next().unwrap_or_default();
let end_index = data_shreds_indices.last().unwrap_or(start_index);
datapoint_debug!(
"blockstore-erasure",
("slot", slot as i64, i64),
("start_index", start_index, i64),
("end_index", end_index + 1, i64),
("recovery_attempted", attempted, bool),
("recovery_status", status, String),
("recovered", recovered as i64, i64),
);
}
fn try_shred_recovery(
db: &Database,
erasure_metas: &HashMap<(Slot, /*fec set index:*/ u64), ErasureMeta>,
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
prev_inserted_datas: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
prev_inserted_codes: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
) -> Vec<Shred> {
let data_cf = db.column::<cf::ShredData>();
let code_cf = db.column::<cf::ShredCode>();
let mut recovered_data_shreds = vec![];
// Recovery rules:
// 1. Only try recovery around indexes for which new data or coding shreds are received
// 2. For new data shreds, check if an erasure set exists. If not, don't try recovery
// 3. Before trying recovery, check if enough number of shreds have been received
// 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data
for (&(slot, _fec_set_index), erasure_meta) in erasure_metas.iter() {
let index_meta_entry = index_working_set.get_mut(&slot).expect("Index");
let index = &mut index_meta_entry.index;
match erasure_meta.status(index) {
ErasureMetaStatus::CanRecover => {
Self::recover_shreds(
index,
erasure_meta,
prev_inserted_datas,
prev_inserted_codes,
&mut recovered_data_shreds,
&data_cf,
&code_cf,
);
}
ErasureMetaStatus::DataFull => {
for i in erasure_meta.coding_shreds_indices() {
// Remove saved coding shreds. We don't need these for future recovery.
if prev_inserted_codes.remove(&(slot, i)).is_some() {
// Remove from the index so it doesn't get committed. We know
// this is safe to do because everything in
// `prev_inserted_codes` does not yet exist in blockstore
// (guaranteed by `check_cache_coding_shred`)
index.coding_mut().set_present(i, false);
}
}
Self::submit_metrics(slot, erasure_meta, false, "complete".into(), 0);
}
ErasureMetaStatus::StillNeed(needed) => {
Self::submit_metrics(
slot,
erasure_meta,
false,
format!("still need: {}", needed),
0,
);
}
};
}
recovered_data_shreds
}
pub fn insert_shreds_handle_duplicate<F>(
&self,
shreds: Vec<Shred>,
is_repaired: Vec<bool>,
leader_schedule: Option<&LeaderScheduleCache>,
is_trusted: bool,
retransmit_sender: Option<&Sender<Vec<Shred>>>,
handle_duplicate: &F,
metrics: &mut BlockstoreInsertionMetrics,
) -> Result<(Vec<CompletedDataSetInfo>, Vec<usize>)>
where
F: Fn(Shred),
{
assert_eq!(shreds.len(), is_repaired.len());
let mut total_start = Measure::start("Total elapsed");
let mut start = Measure::start("Blockstore lock");
let _lock = self.insert_shreds_lock.lock().unwrap();
start.stop();
metrics.insert_lock_elapsed += start.as_us();
let db = &*self.db;
let mut write_batch = db.batch()?;
let mut just_inserted_coding_shreds = HashMap::new();
let mut just_inserted_data_shreds = HashMap::new();
let mut erasure_metas = HashMap::new();
let mut slot_meta_working_set = HashMap::new();
let mut index_working_set = HashMap::new();
metrics.num_shreds += shreds.len();
let mut start = Measure::start("Shred insertion");
let mut index_meta_time = 0;
let mut newly_completed_data_sets: Vec<CompletedDataSetInfo> = vec![];
let mut inserted_indices = Vec::new();
for (i, (shred, is_repaired)) in shreds.into_iter().zip(is_repaired).enumerate() {
match shred.shred_type() {
ShredType::Data => {
let shred_source = if is_repaired {
ShredSource::Repaired
} else {
ShredSource::Turbine
};
match self.check_insert_data_shred(
shred,
&mut erasure_metas,
&mut index_working_set,
&mut slot_meta_working_set,
&mut write_batch,
&mut just_inserted_data_shreds,
&mut index_meta_time,
is_trusted,
handle_duplicate,
leader_schedule,
shred_source,
) {
Err(InsertDataShredError::Exists) => metrics.num_data_shreds_exists += 1,
Err(InsertDataShredError::InvalidShred) => {
metrics.num_data_shreds_invalid += 1
}
Err(InsertDataShredError::BlockstoreError(_)) => {
metrics.num_data_shreds_blockstore_error += 1;
}
Ok(completed_data_sets) => {
newly_completed_data_sets.extend(completed_data_sets);
inserted_indices.push(i);
metrics.num_inserted += 1;
}
};
}
ShredType::Code => {
self.check_cache_coding_shred(
shred,
&mut erasure_metas,
&mut index_working_set,
&mut just_inserted_coding_shreds,
&mut index_meta_time,
handle_duplicate,
is_trusted,
is_repaired,
metrics,
);
}
};
}
start.stop();
metrics.insert_shreds_elapsed += start.as_us();
let mut start = Measure::start("Shred recovery");
if let Some(leader_schedule_cache) = leader_schedule {
let recovered_data_shreds = Self::try_shred_recovery(
db,
&erasure_metas,
&mut index_working_set,
&mut just_inserted_data_shreds,
&mut just_inserted_coding_shreds,
);
metrics.num_recovered += recovered_data_shreds.len();
let recovered_data_shreds: Vec<_> = recovered_data_shreds
.into_iter()
.filter_map(|shred| {
let leader =
leader_schedule_cache.slot_leader_at(shred.slot(), /*bank=*/ None)?;
if !shred.verify(&leader) {
metrics.num_recovered_failed_sig += 1;
return None;
}
match self.check_insert_data_shred(
shred.clone(),
&mut erasure_metas,
&mut index_working_set,
&mut slot_meta_working_set,
&mut write_batch,
&mut just_inserted_data_shreds,
&mut index_meta_time,
is_trusted,
&handle_duplicate,
leader_schedule,
ShredSource::Recovered,
) {
Err(InsertDataShredError::Exists) => {
metrics.num_recovered_exists += 1;
None
}
Err(InsertDataShredError::InvalidShred) => {
metrics.num_recovered_failed_invalid += 1;
None
}
Err(InsertDataShredError::BlockstoreError(_)) => {
metrics.num_recovered_blockstore_error += 1;
None
}
Ok(completed_data_sets) => {
newly_completed_data_sets.extend(completed_data_sets);
metrics.num_recovered_inserted += 1;
Some(shred)
}
}
})
// Always collect recovered-shreds so that above insert code is
// executed even if retransmit-sender is None.
.collect();
if !recovered_data_shreds.is_empty() {
if let Some(retransmit_sender) = retransmit_sender {
let _ = retransmit_sender.send(recovered_data_shreds);
}
}
}
start.stop();
metrics.shred_recovery_elapsed += start.as_us();
metrics.num_inserted += just_inserted_coding_shreds.len() as u64;
for (_, shred) in just_inserted_coding_shreds.into_iter() {
self.check_insert_coding_shred(
shred,
&mut index_working_set,
&mut write_batch,
&mut index_meta_time,
);
}
let mut start = Measure::start("Shred recovery");
// Handle chaining for the members of the slot_meta_working_set that were inserted into,
// drop the others
handle_chaining(&self.db, &mut write_batch, &mut slot_meta_working_set)?;
start.stop();
metrics.chaining_elapsed += start.as_us();
let mut start = Measure::start("Commit Working Sets");
let (should_signal, newly_completed_slots) = commit_slot_meta_working_set(
&slot_meta_working_set,
&self.completed_slots_senders,
&mut write_batch,
)?;
for ((slot, set_index), erasure_meta) in erasure_metas {
write_batch.put::<cf::ErasureMeta>((slot, set_index), &erasure_meta)?;
}
for (&slot, index_working_set_entry) in index_working_set.iter() {
if index_working_set_entry.did_insert_occur {
write_batch.put::<cf::Index>(slot, &index_working_set_entry.index)?;
}
}
start.stop();
metrics.commit_working_sets_elapsed += start.as_us();
let mut start = Measure::start("Write Batch");
self.db.write(write_batch)?;
start.stop();
metrics.write_batch_elapsed += start.as_us();
send_signals(
&self.new_shreds_signals,
&self.completed_slots_senders,
should_signal,
newly_completed_slots,
);
total_start.stop();
metrics.total_elapsed += total_start.as_us();
metrics.index_meta_time += index_meta_time;
Ok((newly_completed_data_sets, inserted_indices))
}
pub fn clear_unconfirmed_slot(&self, slot: Slot) {
let _lock = self.insert_shreds_lock.lock().unwrap();
if let Some(mut slot_meta) = self
.meta(slot)
.expect("Couldn't fetch from SlotMeta column family")
{
// Clear all slot related information
self.run_purge(slot, slot, PurgeType::PrimaryIndex)
.expect("Purge database operations failed");
// Reinsert parts of `slot_meta` that are important to retain, like the `next_slots`
// field.
slot_meta.clear_unconfirmed_slot();
self.meta_cf
.put(slot, &slot_meta)
.expect("Couldn't insert into SlotMeta column family");
} else {
error!(
"clear_unconfirmed_slot() called on slot {} with no SlotMeta",
slot
);
}
}
pub fn insert_shreds(
&self,
shreds: Vec<Shred>,
leader_schedule: Option<&LeaderScheduleCache>,
is_trusted: bool,
) -> Result<(Vec<CompletedDataSetInfo>, Vec<usize>)> {
let shreds_len = shreds.len();
self.insert_shreds_handle_duplicate(
shreds,
vec![false; shreds_len],
leader_schedule,
is_trusted,
None, // retransmit-sender
&|_| {}, // handle-duplicates
&mut BlockstoreInsertionMetrics::default(),
)
}
fn check_insert_coding_shred(
&self,
shred: Shred,
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
write_batch: &mut WriteBatch,
index_meta_time: &mut u64,
) -> bool {
let slot = shred.slot();
let index_meta_working_set_entry =
get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time);
let index_meta = &mut index_meta_working_set_entry.index;
// This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index
self.insert_coding_shred(index_meta, &shred, write_batch)
.map(|_| {
index_meta_working_set_entry.did_insert_occur = true;
})
.is_ok()
}
fn erasure_mismatch(shred1: &Shred, shred2: &Shred) -> bool {
shred1.coding_header.num_coding_shreds != shred2.coding_header.num_coding_shreds
|| shred1.coding_header.num_data_shreds != shred2.coding_header.num_data_shreds
}
#[allow(clippy::too_many_arguments)]
fn check_cache_coding_shred<F>(
&self,
shred: Shred,
erasure_metas: &mut HashMap<(Slot, /*fec set index:*/ u64), ErasureMeta>,
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
just_received_coding_shreds: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
index_meta_time: &mut u64,
handle_duplicate: &F,
is_trusted: bool,
is_repaired: bool,
metrics: &mut BlockstoreInsertionMetrics,
) -> bool
where
F: Fn(Shred),
{
let slot = shred.slot();
let shred_index = u64::from(shred.index());
let index_meta_working_set_entry =
get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time);
let index_meta = &mut index_meta_working_set_entry.index;
// This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index
if !is_trusted {
if index_meta.coding().is_present(shred_index) {
metrics.num_coding_shreds_exists += 1;
handle_duplicate(shred);
return false;
}
if !Blockstore::should_insert_coding_shred(&shred, &self.last_root) {
metrics.num_coding_shreds_invalid += 1;
return false;
}
}
let set_index = u64::from(shred.common_header.fec_set_index);
let erasure_config = ErasureConfig::new(
shred.coding_header.num_data_shreds as usize,
shred.coding_header.num_coding_shreds as usize,
);
let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| {
self.erasure_meta(slot, set_index)
.expect("Expect database get to succeed")
.unwrap_or_else(|| ErasureMeta::new(set_index, erasure_config))
});
// TODO: handle_duplicate is not invoked and so duplicate shreds are
// not gossiped to the rest of cluster.
if erasure_config != erasure_meta.config() {
metrics.num_coding_shreds_invalid_erasure_config += 1;
let conflicting_shred = self.find_conflicting_coding_shred(
&shred,
slot,
erasure_meta,
just_received_coding_shreds,
);
if let Some(conflicting_shred) = conflicting_shred {
if self
.store_duplicate_if_not_existing(slot, conflicting_shred, shred.payload.clone())
.is_err()
{
warn!("bad duplicate store..");
}
} else {
datapoint_info!("bad-conflict-shred", ("slot", slot, i64));
}
// ToDo: This is a potential slashing condition
warn!("Received multiple erasure configs for the same erasure set!!!");
warn!(
"Slot: {}, shred index: {}, set_index: {}, is_duplicate: {}, stored config: {:#?}, new config: {:#?}",
slot, shred.index(), set_index, self.has_duplicate_shreds_in_slot(slot), erasure_meta.config(), erasure_config
);
return false;
}
if is_repaired {
let mut slots_stats = self.slots_stats.lock().unwrap();
let mut e = slots_stats.stats.entry(slot).or_default();
e.num_repaired += 1;
}
// Should be safe to modify index_meta here. Two cases
// 1) Recovery happens: Then all inserted erasure metas are removed
// from just_received_coding_shreds, and nothing will be committed by
// `check_insert_coding_shred`, so the coding index meta will not be
// committed
index_meta.coding_mut().set_present(shred_index, true);
if let HashMapEntry::Vacant(entry) = just_received_coding_shreds.entry((slot, shred_index))
{
metrics.num_coding_shreds_inserted += 1;
entry.insert(shred);
}
true
}
fn find_conflicting_coding_shred(
&self,
shred: &Shred,
slot: Slot,
erasure_meta: &ErasureMeta,
just_received_coding_shreds: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
) -> Option<Vec<u8>> {
// Search for the shred which set the initial erasure config, either inserted,
// or in the current batch in just_received_coding_shreds.
let mut conflicting_shred = None;
for coding_index in erasure_meta.coding_shreds_indices() {
let maybe_shred = self.get_coding_shred(slot, coding_index);
if let Ok(Some(shred_data)) = maybe_shred {
let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap();
if Self::erasure_mismatch(&potential_shred, shred) {
conflicting_shred = Some(potential_shred.payload);
}
break;
} else if let Some(potential_shred) =
just_received_coding_shreds.get(&(slot, coding_index))
{
if Self::erasure_mismatch(potential_shred, shred) {
conflicting_shred = Some(potential_shred.payload.clone());
}
break;
}
}
conflicting_shred
}
#[allow(clippy::too_many_arguments)]
fn check_insert_data_shred<F>(
&self,
shred: Shred,
erasure_metas: &mut HashMap<(Slot, /*fec set index:*/ u64), ErasureMeta>,
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
write_batch: &mut WriteBatch,
just_inserted_data_shreds: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>,
index_meta_time: &mut u64,
is_trusted: bool,
handle_duplicate: &F,
leader_schedule: Option<&LeaderScheduleCache>,
shred_source: ShredSource,
) -> std::result::Result<Vec<CompletedDataSetInfo>, InsertDataShredError>
where
F: Fn(Shred),
{
let slot = shred.slot();
let shred_index = u64::from(shred.index());
let index_meta_working_set_entry =
get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time);
let index_meta = &mut index_meta_working_set_entry.index;
let slot_meta_entry =
get_slot_meta_entry(&self.db, slot_meta_working_set, slot, shred.parent());
let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut();
if !is_trusted {
if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) {
handle_duplicate(shred);
return Err(InsertDataShredError::Exists);
}
if shred.last_in_slot() && shred_index < slot_meta.received && !slot_meta.is_full() {
// We got a last shred < slot_meta.received, which signals there's an alternative,
// shorter version of the slot. Because also `!slot_meta.is_full()`, then this
// means, for the current version of the slot, we might never get all the
// shreds < the current last index, never replay this slot, and make no
// progress (for instance if a leader sends an additional detached "last index"
// shred with a very high index, but none of the intermediate shreds). Ideally, we would
// just purge all shreds > the new last index slot, but because replay may have already
// replayed entries past the newly detected "last" shred, then mark the slot as dead
// and wait for replay to dump and repair the correct version.
warn!("Received *last* shred index {} less than previous shred index {}, and slot {} is not full, marking slot dead", shred_index, slot_meta.received, slot);
write_batch.put::<cf::DeadSlots>(slot, &true).unwrap();
}
if !self.should_insert_data_shred(
&shred,
slot_meta,
just_inserted_data_shreds,
&self.last_root,
leader_schedule,
shred_source.clone(),
) {
return Err(InsertDataShredError::InvalidShred);
}
}
let set_index = u64::from(shred.common_header.fec_set_index);
let newly_completed_data_sets = self.insert_data_shred(
slot_meta,
index_meta.data_mut(),
&shred,
write_batch,
shred_source,
)?;
just_inserted_data_shreds.insert((slot, shred_index), shred);
index_meta_working_set_entry.did_insert_occur = true;
slot_meta_entry.did_insert_occur = true;
if let HashMapEntry::Vacant(entry) = erasure_metas.entry((slot, set_index)) {
if let Some(meta) = self.erasure_meta(slot, set_index).unwrap() {
entry.insert(meta);
}
}
Ok(newly_completed_data_sets)
}
fn should_insert_coding_shred(shred: &Shred, last_root: &RwLock<u64>) -> bool {
let shred_index = shred.index();
let fec_set_index = shred.common_header.fec_set_index;
let num_coding_shreds = shred.coding_header.num_coding_shreds as u32;
shred.is_code()
&& shred_index >= fec_set_index
&& shred_index - fec_set_index < num_coding_shreds
&& num_coding_shreds != 0
&& num_coding_shreds <= 8 * MAX_DATA_SHREDS_PER_FEC_BLOCK
&& num_coding_shreds - 1 <= u32::MAX - fec_set_index
&& shred.slot() > *last_root.read().unwrap()
}
fn insert_coding_shred(
&self,
index_meta: &mut Index,
shred: &Shred,
write_batch: &mut WriteBatch,
) -> Result<()> {
let slot = shred.slot();
let shred_index = u64::from(shred.index());
// Assert guaranteed by integrity checks on the shred that happen before
// `insert_coding_shred` is called
assert!(shred.is_code() && shred_index >= shred.common_header.fec_set_index as u64);
// Commit step: commit all changes to the mutable structures at once, or none at all.
// We don't want only a subset of these changes going through.
write_batch.put_bytes::<cf::ShredCode>((slot, shred_index), &shred.payload)?;
index_meta.coding_mut().set_present(shred_index, true);
Ok(())
}
fn is_data_shred_present(shred: &Shred, slot_meta: &SlotMeta, data_index: &ShredIndex) -> bool {
let shred_index = u64::from(shred.index());
// Check that the shred doesn't already exist in blockstore
shred_index < slot_meta.consumed || data_index.is_present(shred_index)
}
fn get_data_shred_from_just_inserted_or_db<'a>(
&'a self,
just_inserted_data_shreds: &'a HashMap<(Slot, /*shred index:*/ u64), Shred>,
slot: Slot,
index: u64,
) -> Cow<'a, Vec<u8>> {
if let Some(shred) = just_inserted_data_shreds.get(&(slot, index)) {
Cow::Borrowed(&shred.payload)
} else {
// If it doesn't exist in the just inserted set, it must exist in
// the backing store
Cow::Owned(self.get_data_shred(slot, index).unwrap().unwrap())
}
}
fn should_insert_data_shred(
&self,
shred: &Shred,
slot_meta: &SlotMeta,
just_inserted_data_shreds: &HashMap<(Slot, /*shred index:*/ u64), Shred>,
last_root: &RwLock<u64>,
leader_schedule: Option<&LeaderScheduleCache>,
shred_source: ShredSource,
) -> bool {
let shred_index = u64::from(shred.index());
let slot = shred.slot();
let last_in_slot = if shred.last_in_slot() {
debug!("got last in slot");
true
} else {
false
};
if shred.data_header.size == 0 {
let leader_pubkey = leader_schedule
.and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
datapoint_error!(
"blockstore_error",
(
"error",
format!(
"Leader {:?}, slot {}: received index {} is empty",
leader_pubkey, slot, shred_index,
),
String
)
);
return false;
}
if shred.payload.len() > SHRED_PAYLOAD_SIZE {
let leader_pubkey = leader_schedule
.and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
datapoint_error!(
"blockstore_error",
(
"error",
format!(
"Leader {:?}, slot {}: received index {} shred.payload.len() > SHRED_PAYLOAD_SIZE",
leader_pubkey, slot, shred_index,
),
String
)
);
return false;
}
// Check that we do not receive shred_index >= than the last_index
// for the slot
let last_index = slot_meta.last_index;
if shred_index >= last_index {
let leader_pubkey = leader_schedule
.and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
let ending_shred: Cow<Vec<u8>> = self.get_data_shred_from_just_inserted_or_db(
just_inserted_data_shreds,
slot,
last_index,
);
if self
.store_duplicate_if_not_existing(
slot,
ending_shred.into_owned(),
shred.payload.clone(),
)
.is_err()
{
warn!("store duplicate error");
}
datapoint_error!(
"blockstore_error",
(
"error",
format!(
"Leader {:?}, slot {}: received index {} >= slot.last_index {}, shred_source: {:?}",
leader_pubkey, slot, shred_index, last_index, shred_source
),
String
)
);
return false;
}
// Check that we do not receive a shred with "last_index" true, but shred_index
// less than our current received
if last_in_slot && shred_index < slot_meta.received {
let leader_pubkey = leader_schedule
.and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));
let ending_shred: Cow<Vec<u8>> = self.get_data_shred_from_just_inserted_or_db(
just_inserted_data_shreds,
slot,
slot_meta.received - 1,
);
if self
.store_duplicate_if_not_existing(
slot,
ending_shred.into_owned(),
shred.payload.clone(),
)
.is_err()
{
warn!("store duplicate error");
}
datapoint_error!(
"blockstore_error",
(
"error",
format!(
"Leader {:?}, slot {}: received shred_index {} < slot.received {}, shred_source: {:?}",
leader_pubkey, slot, shred_index, slot_meta.received, shred_source
),
String
)
);
return false;
}
let last_root = *last_root.read().unwrap();
verify_shred_slots(slot, slot_meta.parent_slot, last_root)
}
fn insert_data_shred(
&self,
slot_meta: &mut SlotMeta,
data_index: &mut ShredIndex,
shred: &Shred,
write_batch: &mut WriteBatch,
shred_source: ShredSource,
) -> Result<Vec<CompletedDataSetInfo>> {
let slot = shred.slot();
let index = u64::from(shred.index());
let last_in_slot = if shred.last_in_slot() {
debug!("got last in slot");
true
} else {
false
};
let last_in_data = if shred.data_complete() {
debug!("got last in data");
true
} else {
false
};
// Parent for slot meta should have been set by this point
assert!(!is_orphan(slot_meta));
let new_consumed = if slot_meta.consumed == index {
let mut current_index = index + 1;
while data_index.is_present(current_index) {
current_index += 1;
}
current_index
} else {
slot_meta.consumed
};
// Commit step: commit all changes to the mutable structures at once, or none at all.
// We don't want only a subset of these changes going through.
write_batch.put_bytes::<cf::ShredData>(
(slot, index),
// Payload will be padded out to SHRED_PAYLOAD_SIZE
// But only need to store the bytes within data_header.size
&shred.payload[..shred.data_header.size as usize],
)?;
data_index.set_present(index, true);
let newly_completed_data_sets = update_slot_meta(
last_in_slot,
last_in_data,
slot_meta,
index as u32,
new_consumed,
shred.reference_tick(),
data_index,
)
.into_iter()
.map(|(start_index, end_index)| CompletedDataSetInfo {
slot,
start_index,
end_index,
})
.collect();
if shred_source == ShredSource::Repaired || shred_source == ShredSource::Recovered {
let mut slots_stats = self.slots_stats.lock().unwrap();
let mut e = slots_stats.stats.entry(slot_meta.slot).or_default();
if shred_source == ShredSource::Repaired {
e.num_repaired += 1;
}
if shred_source == ShredSource::Recovered {
e.num_recovered += 1;
}
}
if slot_meta.is_full() {
let (num_repaired, num_recovered) = {
let mut slots_stats = self.slots_stats.lock().unwrap();
if let Some(e) = slots_stats.stats.remove(&slot_meta.slot) {
if slots_stats.last_cleanup_ts.elapsed().as_secs() > 30 {
let root = self.last_root();
slots_stats.stats = slots_stats.stats.split_off(&root);
slots_stats.last_cleanup_ts = Instant::now();
}
(e.num_repaired, e.num_recovered)
} else {
(0, 0)
}
};
datapoint_info!(
"shred_insert_is_full",
(
"total_time_ms",
solana_sdk::timing::timestamp() - slot_meta.first_shred_timestamp,
i64
),
("slot", slot_meta.slot, i64),
("last_index", slot_meta.last_index, i64),
("num_repaired", num_repaired, i64),
("num_recovered", num_recovered, i64),
);
}
trace!("inserted shred into slot {:?} and index {:?}", slot, index);
Ok(newly_completed_data_sets)
}
pub fn get_data_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
self.data_shred_cf.get_bytes((slot, index)).map(|data| {
data.map(|mut d| {
// Only data_header.size bytes stored in the blockstore so
// pad the payload out to SHRED_PAYLOAD_SIZE so that the
// erasure recovery works properly.
d.resize(cmp::max(d.len(), SHRED_PAYLOAD_SIZE), 0);
d
})
})
}
pub fn get_data_shreds_for_slot(
&self,
slot: Slot,
start_index: u64,
) -> ShredResult<Vec<Shred>> {
self.slot_data_iterator(slot, start_index)
.expect("blockstore couldn't fetch iterator")
.map(|data| Shred::new_from_serialized_shred(data.1.to_vec()))
.collect()
}
pub fn get_data_shreds(
&self,
slot: Slot,
from_index: u64,
to_index: u64,
buffer: &mut [u8],
) -> Result<(u64, usize)> {
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
return Err(BlockstoreError::SlotCleanedUp);
}
let meta_cf = self.db.column::<cf::SlotMeta>();
let mut buffer_offset = 0;
let mut last_index = 0;
if let Some(meta) = meta_cf.get(slot)? {
if !meta.is_full() {
warn!("The slot is not yet full. Will not return any shreds");
return Ok((last_index, buffer_offset));
}
let to_index = cmp::min(to_index, meta.consumed);
for index in from_index..to_index {
if let Some(shred_data) = self.get_data_shred(slot, index)? {
let shred_len = shred_data.len();
if buffer.len().saturating_sub(buffer_offset) >= shred_len {
buffer[buffer_offset..buffer_offset + shred_len]
.copy_from_slice(&shred_data[..shred_len]);
buffer_offset += shred_len;
last_index = index;
// All shreds are of the same length.
// Let's check if we have scope to accommodate another shred
// If not, let's break right away, as it'll save on 1 DB read
if buffer.len().saturating_sub(buffer_offset) < shred_len {
break;
}
} else {
break;
}
}
}
}
Ok((last_index, buffer_offset))
}
pub fn get_coding_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> {
self.code_shred_cf.get_bytes((slot, index))
}
pub fn get_coding_shreds_for_slot(
&self,
slot: Slot,
start_index: u64,
) -> ShredResult<Vec<Shred>> {
self.slot_coding_iterator(slot, start_index)
.expect("blockstore couldn't fetch iterator")
.map(|code| Shred::new_from_serialized_shred(code.1.to_vec()))
.collect()
}
// Only used by tests
#[allow(clippy::too_many_arguments)]
pub(crate) fn write_entries(
&self,
start_slot: Slot,
num_ticks_in_start_slot: u64,
start_index: u32,
ticks_per_slot: u64,
parent: Option<u64>,
is_full_slot: bool,
keypair: &Arc<Keypair>,
entries: Vec<Entry>,
version: u16,
) -> Result<usize /*num of data shreds*/> {
let mut parent_slot = parent.map_or(start_slot.saturating_sub(1), |v| v);
let num_slots = (start_slot - parent_slot).max(1); // Note: slot 0 has parent slot 0
assert!(num_ticks_in_start_slot < num_slots * ticks_per_slot);
let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot;
let mut current_slot = start_slot;
let mut shredder =
Shredder::new(current_slot, parent_slot, keypair.clone(), 0, version).unwrap();
let mut all_shreds = vec![];
let mut slot_entries = vec![];
// Find all the entries for start_slot
for entry in entries.into_iter() {
if remaining_ticks_in_slot == 0 {
current_slot += 1;
parent_slot = current_slot - 1;
remaining_ticks_in_slot = ticks_per_slot;
let mut current_entries = vec![];
std::mem::swap(&mut slot_entries, &mut current_entries);
let start_index = {
if all_shreds.is_empty() {
start_index
} else {
0
}
};
let (mut data_shreds, mut coding_shreds, _) =
shredder.entries_to_shreds(¤t_entries, true, start_index);
all_shreds.append(&mut data_shreds);
all_shreds.append(&mut coding_shreds);
shredder = Shredder::new(
current_slot,
parent_slot,
keypair.clone(),
(ticks_per_slot - remaining_ticks_in_slot) as u8,
version,
)
.unwrap();
}
if entry.is_tick() {
remaining_ticks_in_slot -= 1;
}
slot_entries.push(entry);
}
if !slot_entries.is_empty() {
let (mut data_shreds, mut coding_shreds, _) =
shredder.entries_to_shreds(&slot_entries, is_full_slot, 0);
all_shreds.append(&mut data_shreds);
all_shreds.append(&mut coding_shreds);
}
let num_data = all_shreds.iter().filter(|shred| shred.is_data()).count();
self.insert_shreds(all_shreds, None, false)?;
Ok(num_data)
}
pub fn get_index(&self, slot: Slot) -> Result<Option<Index>> {
self.index_cf.get(slot)
}
/// Manually update the meta for a slot.
/// Can interfere with automatic meta update and potentially break chaining.
/// Dangerous. Use with care.
pub fn put_meta_bytes(&self, slot: Slot, bytes: &[u8]) -> Result<()> {
self.meta_cf.put_bytes(slot, bytes)
}
// Given a start and end entry index, find all the missing
// indexes in the ledger in the range [start_index, end_index)
// for the slot with the specified slot
fn find_missing_indexes<C>(
db_iterator: &mut DBRawIterator,
slot: Slot,
first_timestamp: u64,
start_index: u64,
end_index: u64,
max_missing: usize,
) -> Vec<u64>
where
C: Column<Index = (u64, u64)>,
{
if start_index >= end_index || max_missing == 0 {
return vec![];
}
let mut missing_indexes = vec![];
let ticks_since_first_insert =
DEFAULT_TICKS_PER_SECOND * (timestamp() - first_timestamp) / 1000;
// Seek to the first shred with index >= start_index
db_iterator.seek(&C::key((slot, start_index)));
// The index of the first missing shred in the slot
let mut prev_index = start_index;
'outer: loop {
if !db_iterator.valid() {
for i in prev_index..end_index {
missing_indexes.push(i);
if missing_indexes.len() == max_missing {
break;
}
}
break;
}
let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key"));
let current_index = {
if current_slot > slot {
end_index
} else {
index
}
};
let upper_index = cmp::min(current_index, end_index);
// the tick that will be used to figure out the timeout for this hole
let reference_tick = u64::from(Shred::reference_tick_from_data(
db_iterator.value().expect("couldn't read value"),
));
if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS {
// The higher index holes have not timed out yet
break 'outer;
}
for i in prev_index..upper_index {
missing_indexes.push(i);
if missing_indexes.len() == max_missing {
break 'outer;
}
}
if current_slot > slot {
break;
}
if current_index >= end_index {
break;
}
prev_index = current_index + 1;
db_iterator.next();
}
missing_indexes
}
pub fn find_missing_data_indexes(
&self,
slot: Slot,
first_timestamp: u64,
start_index: u64,
end_index: u64,
max_missing: usize,
) -> Vec<u64> {
if let Ok(mut db_iterator) = self
.db
.raw_iterator_cf(self.db.cf_handle::<cf::ShredData>())
{
Self::find_missing_indexes::<cf::ShredData>(
&mut db_iterator,
slot,
first_timestamp,
start_index,
end_index,
max_missing,
)
} else {
vec![]
}
}
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_block_time".to_string(), String)
);
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
return Err(BlockstoreError::SlotCleanedUp);
}
self.blocktime_cf.get(slot)
}
pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> {
self.blocktime_cf.put(slot, ×tamp)
}
pub fn get_block_height(&self, slot: Slot) -> Result<Option<u64>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_block_height".to_string(), String)
);
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
return Err(BlockstoreError::SlotCleanedUp);
}
self.block_height_cf.get(slot)
}
pub fn cache_block_height(&self, slot: Slot, block_height: u64) -> Result<()> {
self.block_height_cf.put(slot, &block_height)
}
pub fn get_first_available_block(&self) -> Result<Slot> {
let mut root_iterator = self.rooted_slot_iterator(self.lowest_slot())?;
Ok(root_iterator.next().unwrap_or_default())
}
pub fn get_rooted_block(
&self,
slot: Slot,
require_previous_blockhash: bool,
) -> Result<ConfirmedBlock> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_rooted_block".to_string(), String)
);
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot {
return Err(BlockstoreError::SlotCleanedUp);
}
if self.is_root(slot) {
return self.get_complete_block(slot, require_previous_blockhash);
}
Err(BlockstoreError::SlotNotRooted)
}
pub fn get_complete_block(
&self,
slot: Slot,
require_previous_blockhash: bool,
) -> Result<ConfirmedBlock> {
let slot_meta_cf = self.db.column::<cf::SlotMeta>();
let slot_meta = match slot_meta_cf.get(slot)? {
Some(slot_meta) => slot_meta,
None => {
info!("SlotMeta not found for slot {}", slot);
return Err(BlockstoreError::SlotUnavailable);
}
};
if slot_meta.is_full() {
let slot_entries = self.get_slot_entries(slot, 0)?;
if !slot_entries.is_empty() {
let slot_transaction_iterator = slot_entries
.iter()
.cloned()
.flat_map(|entry| entry.transactions)
.map(|transaction| {
if let Err(err) = transaction.sanitize() {
warn!(
"Blockstore::get_block sanitize failed: {:?}, \
slot: {:?}, \
{:?}",
err, slot, transaction,
);
}
transaction
});
let parent_slot_entries = self
.get_slot_entries(slot_meta.parent_slot, 0)
.unwrap_or_default();
if parent_slot_entries.is_empty() && require_previous_blockhash {
return Err(BlockstoreError::ParentEntriesUnavailable);
}
let previous_blockhash = if !parent_slot_entries.is_empty() {
get_last_hash(parent_slot_entries.iter()).unwrap()
} else {
Hash::default()
};
let blockhash = get_last_hash(slot_entries.iter())
.unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot));
let rewards = self
.rewards_cf
.get_protobuf_or_bincode::<StoredExtendedRewards>(slot)?
.unwrap_or_default()
.into();
// The Blocktime and BlockHeight column families are updated asynchronously; they
// may not be written by the time the complete slot entries are available. In this
// case, these fields will be `None`.
let block_time = self.blocktime_cf.get(slot)?;
let block_height = self.block_height_cf.get(slot)?;
let block = ConfirmedBlock {
previous_blockhash: previous_blockhash.to_string(),
blockhash: blockhash.to_string(),
parent_slot: slot_meta.parent_slot,
transactions: self
.map_transactions_to_statuses(slot, slot_transaction_iterator),
rewards,
block_time,
block_height,
};
return Ok(block);
}
}
Err(BlockstoreError::SlotUnavailable)
}
pub fn map_transactions_to_statuses<'a>(
&self,
slot: Slot,
iterator: impl Iterator<Item = Transaction> + 'a,
) -> Vec<TransactionWithStatusMeta> {
iterator
.map(|transaction| {
let signature = transaction.signatures[0];
TransactionWithStatusMeta {
transaction,
meta: self
.read_transaction_status((signature, slot))
.ok()
.flatten(),
}
})
.collect()
}
/// Initializes the TransactionStatusIndex column family with two records, `0` and `1`,
/// which are used as the primary index for entries in the TransactionStatus and
/// AddressSignatures columns. At any given time, one primary index is active (ie. new records
/// are stored under this index), the other is frozen.
fn initialize_transaction_status_index(&self) -> Result<()> {
self.transaction_status_index_cf
.put(0, &TransactionStatusIndexMeta::default())?;
self.transaction_status_index_cf
.put(1, &TransactionStatusIndexMeta::default())?;
// This dummy status improves compaction performance
let default_status = TransactionStatusMeta::default().into();
self.transaction_status_cf
.put_protobuf(cf::TransactionStatus::as_index(2), &default_status)?;
self.address_signatures_cf.put(
cf::AddressSignatures::as_index(2),
&AddressSignatureMeta::default(),
)
}
/// Toggles the active primary index between `0` and `1`, and clears the stored max-slot of the
/// frozen index in preparation for pruning.
fn toggle_transaction_status_index(
&self,
batch: &mut WriteBatch,
w_active_transaction_status_index: &mut u64,
to_slot: Slot,
) -> Result<Option<u64>> {
let index0 = self.transaction_status_index_cf.get(0)?;
if index0.is_none() {
return Ok(None);
}
let mut index0 = index0.unwrap();
let mut index1 = self.transaction_status_index_cf.get(1)?.unwrap();
if !index0.frozen && !index1.frozen {
index0.frozen = true;
*w_active_transaction_status_index = 1;
batch.put::<cf::TransactionStatusIndex>(0, &index0)?;
Ok(None)
} else {
let purge_target_primary_index = if index0.frozen && to_slot > index0.max_slot {
info!(
"Pruning expired primary index 0 up to slot {} (max requested: {})",
index0.max_slot, to_slot
);
Some(0)
} else if index1.frozen && to_slot > index1.max_slot {
info!(
"Pruning expired primary index 1 up to slot {} (max requested: {})",
index1.max_slot, to_slot
);
Some(1)
} else {
None
};
if let Some(purge_target_primary_index) = purge_target_primary_index {
*w_active_transaction_status_index = purge_target_primary_index;
if index0.frozen {
index0.max_slot = 0
};
index0.frozen = !index0.frozen;
batch.put::<cf::TransactionStatusIndex>(0, &index0)?;
if index1.frozen {
index1.max_slot = 0
};
index1.frozen = !index1.frozen;
batch.put::<cf::TransactionStatusIndex>(1, &index1)?;
}
Ok(purge_target_primary_index)
}
}
fn get_primary_index_to_write(
&self,
slot: Slot,
// take WriteGuard to require critical section semantics at call site
w_active_transaction_status_index: &RwLockWriteGuard<Slot>,
) -> Result<u64> {
let i = **w_active_transaction_status_index;
let mut index_meta = self.transaction_status_index_cf.get(i)?.unwrap();
if slot > index_meta.max_slot {
assert!(!index_meta.frozen);
index_meta.max_slot = slot;
self.transaction_status_index_cf.put(i, &index_meta)?;
}
Ok(i)
}
pub fn read_transaction_status(
&self,
index: (Signature, Slot),
) -> Result<Option<TransactionStatusMeta>> {
let (signature, slot) = index;
let result = self
.transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((0, signature, slot))?;
if result.is_none() {
Ok(self
.transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((1, signature, slot))?
.and_then(|meta| meta.try_into().ok()))
} else {
Ok(result.and_then(|meta| meta.try_into().ok()))
}
}
pub fn write_transaction_status(
&self,
slot: Slot,
signature: Signature,
writable_keys: Vec<&Pubkey>,
readonly_keys: Vec<&Pubkey>,
status: TransactionStatusMeta,
) -> Result<()> {
let status = status.into();
// This write lock prevents interleaving issues with the transaction_status_index_cf by gating
// writes to that column
let w_active_transaction_status_index =
self.active_transaction_status_index.write().unwrap();
let primary_index =
self.get_primary_index_to_write(slot, &w_active_transaction_status_index)?;
self.transaction_status_cf
.put_protobuf((primary_index, signature, slot), &status)?;
for address in writable_keys {
self.address_signatures_cf.put(
(primary_index, *address, slot, signature),
&AddressSignatureMeta { writeable: true },
)?;
}
for address in readonly_keys {
self.address_signatures_cf.put(
(primary_index, *address, slot, signature),
&AddressSignatureMeta { writeable: false },
)?;
}
Ok(())
}
pub fn read_transaction_memos(&self, signature: Signature) -> Result<Option<String>> {
self.transaction_memos_cf.get(signature)
}
pub fn write_transaction_memos(&self, signature: &Signature, memos: String) -> Result<()> {
self.transaction_memos_cf.put(*signature, &memos)
}
fn ensure_lowest_cleanup_slot(&self) -> (std::sync::RwLockReadGuard<Slot>, Slot) {
// Ensures consistent result by using lowest_cleanup_slot as the lower bound
// for reading columns that do not employ strong read consistency with slot-based
// delete_range
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
let lowest_available_slot = (*lowest_cleanup_slot)
.checked_add(1)
.expect("overflow from trusted value");
// Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact
// needed slots here at any given moment.
// Blockstore callers, like rpc, can process concurrent read queries
(lowest_cleanup_slot, lowest_available_slot)
}
// Returns a transaction status, as well as a loop counter for unit testing
fn get_transaction_status_with_counter(
&self,
signature: Signature,
confirmed_unrooted_slots: &[Slot],
) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> {
let mut counter = 0;
let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot();
for transaction_status_cf_primary_index in 0..=1 {
let index_iterator = self.transaction_status_cf.iter(IteratorMode::From(
(
transaction_status_cf_primary_index,
signature,
lowest_available_slot,
),
IteratorDirection::Forward,
))?;
for ((i, sig, slot), _data) in index_iterator {
counter += 1;
if i != transaction_status_cf_primary_index || sig != signature {
break;
}
if !self.is_root(slot) && !confirmed_unrooted_slots.contains(&slot) {
continue;
}
let status = self
.transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((i, sig, slot))?
.and_then(|status| status.try_into().ok())
.map(|status| (slot, status));
return Ok((status, counter));
}
}
drop(lock);
Ok((None, counter))
}
/// Returns a transaction status
pub fn get_rooted_transaction_status(
&self,
signature: Signature,
) -> Result<Option<(Slot, TransactionStatusMeta)>> {
datapoint_info!(
"blockstore-rpc-api",
(
"method",
"get_rooted_transaction_status".to_string(),
String
)
);
self.get_transaction_status(signature, &[])
}
/// Returns a transaction status
pub fn get_transaction_status(
&self,
signature: Signature,
confirmed_unrooted_slots: &[Slot],
) -> Result<Option<(Slot, TransactionStatusMeta)>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_transaction_status".to_string(), String)
);
self.get_transaction_status_with_counter(signature, confirmed_unrooted_slots)
.map(|(status, _)| status)
}
/// Returns a complete transaction if it was processed in a root
pub fn get_rooted_transaction(
&self,
signature: Signature,
) -> Result<Option<ConfirmedTransaction>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_rooted_transaction".to_string(), String)
);
self.get_transaction_with_status(signature, &[])
}
/// Returns a complete transaction
pub fn get_complete_transaction(
&self,
signature: Signature,
highest_confirmed_slot: Slot,
) -> Result<Option<ConfirmedTransaction>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_complete_transaction".to_string(), String)
);
let last_root = self.last_root();
let confirmed_unrooted_slots: Vec<_> =
AncestorIterator::new_inclusive(highest_confirmed_slot, self)
.take_while(|&slot| slot > last_root)
.collect();
self.get_transaction_with_status(signature, &confirmed_unrooted_slots)
}
fn get_transaction_with_status(
&self,
signature: Signature,
confirmed_unrooted_slots: &[Slot],
) -> Result<Option<ConfirmedTransaction>> {
if let Some((slot, status)) =
self.get_transaction_status(signature, confirmed_unrooted_slots)?
{
let transaction = self
.find_transaction_in_slot(slot, signature)?
.ok_or(BlockstoreError::TransactionStatusSlotMismatch)?; // Should not happen
let block_time = self.get_block_time(slot)?;
Ok(Some(ConfirmedTransaction {
slot,
transaction: TransactionWithStatusMeta {
transaction,
meta: Some(status),
},
block_time,
}))
} else {
Ok(None)
}
}
fn find_transaction_in_slot(
&self,
slot: Slot,
signature: Signature,
) -> Result<Option<Transaction>> {
let slot_entries = self.get_slot_entries(slot, 0)?;
Ok(slot_entries
.iter()
.cloned()
.flat_map(|entry| entry.transactions)
.map(|transaction| {
if let Err(err) = transaction.sanitize() {
warn!(
"Blockstore::find_transaction_in_slot sanitize failed: {:?}, \
slot: {:?}, \
{:?}",
err, slot, transaction,
);
}
transaction
})
.find(|transaction| transaction.signatures[0] == signature))
}
// Returns all rooted signatures for an address, ordered by slot that the transaction was
// processed in. Within each slot the transactions will be ordered by signature, and NOT by
// the order in which the transactions exist in the block
//
// DEPRECATED
fn find_address_signatures(
&self,
pubkey: Pubkey,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<(Slot, Signature)>> {
let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot();
let mut signatures: Vec<(Slot, Signature)> = vec![];
for transaction_status_cf_primary_index in 0..=1 {
let index_iterator = self.address_signatures_cf.iter(IteratorMode::From(
(
transaction_status_cf_primary_index,
pubkey,
start_slot.max(lowest_available_slot),
Signature::default(),
),
IteratorDirection::Forward,
))?;
for ((i, address, slot, signature), _) in index_iterator {
if i != transaction_status_cf_primary_index || slot > end_slot || address != pubkey
{
break;
}
if self.is_root(slot) {
signatures.push((slot, signature));
}
}
}
drop(lock);
signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1)));
Ok(signatures)
}
// Returns all signatures for an address in a particular slot, regardless of whether that slot
// has been rooted. The transactions will be ordered by signature, and NOT by the order in
// which the transactions exist in the block
fn find_address_signatures_for_slot(
&self,
pubkey: Pubkey,
slot: Slot,
) -> Result<Vec<(Slot, Signature)>> {
let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot();
let mut signatures: Vec<(Slot, Signature)> = vec![];
for transaction_status_cf_primary_index in 0..=1 {
let index_iterator = self.address_signatures_cf.iter(IteratorMode::From(
(
transaction_status_cf_primary_index,
pubkey,
slot.max(lowest_available_slot),
Signature::default(),
),
IteratorDirection::Forward,
))?;
for ((i, address, transaction_slot, signature), _) in index_iterator {
if i != transaction_status_cf_primary_index
|| transaction_slot > slot
|| address != pubkey
{
break;
}
signatures.push((slot, signature));
}
}
drop(lock);
signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1)));
Ok(signatures)
}
// DEPRECATED
pub fn get_confirmed_signatures_for_address(
&self,
pubkey: Pubkey,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<Signature>> {
datapoint_info!(
"blockstore-rpc-api",
(
"method",
"get_confirmed_signatures_for_address".to_string(),
String
)
);
self.find_address_signatures(pubkey, start_slot, end_slot)
.map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect())
}
pub fn get_confirmed_signatures_for_address2(
&self,
address: Pubkey,
highest_slot: Slot, // highest_confirmed_root or highest_confirmed_slot
before: Option<Signature>,
until: Option<Signature>,
limit: usize,
) -> Result<SignatureInfosForAddress> {
datapoint_info!(
"blockstore-rpc-api",
(
"method",
"get_confirmed_signatures_for_address2".to_string(),
String
)
);
let last_root = self.last_root();
let confirmed_unrooted_slots: Vec<_> = AncestorIterator::new_inclusive(highest_slot, self)
.take_while(|&slot| slot > last_root)
.collect();
// Figure the `slot` to start listing signatures at, based on the ledger location of the
// `before` signature if present. Also generate a HashSet of signatures that should
// be excluded from the results.
let mut get_before_slot_timer = Measure::start("get_before_slot_timer");
let (slot, mut before_excluded_signatures) = match before {
None => (highest_slot, None),
Some(before) => {
let transaction_status =
self.get_transaction_status(before, &confirmed_unrooted_slots)?;
match transaction_status {
None => return Ok(SignatureInfosForAddress::default()),
Some((slot, _)) => {
let block = self.get_complete_block(slot, false).map_err(|err| {
BlockstoreError::Io(IoError::new(
ErrorKind::Other,
format!("Unable to get block: {}", err),
))
})?;
// Load all signatures for the block
let mut slot_signatures: Vec<_> = block
.transactions
.into_iter()
.filter_map(|transaction_with_meta| {
transaction_with_meta
.transaction
.signatures
.into_iter()
.next()
})
.collect();
// Sort signatures as a way to entire a stable ordering within a slot, as
// the AddressSignatures column is ordered by signatures within a slot,
// not by block ordering
slot_signatures.sort();
slot_signatures.reverse();
if let Some(pos) = slot_signatures.iter().position(|&x| x == before) {
slot_signatures.truncate(pos + 1);
}
(
slot,
Some(slot_signatures.into_iter().collect::<HashSet<_>>()),
)
}
}
}
};
get_before_slot_timer.stop();
// Generate a HashSet of signatures that should be excluded from the results based on
// `until` signature
let mut get_until_slot_timer = Measure::start("get_until_slot_timer");
let (lowest_slot, until_excluded_signatures) = match until {
None => (0, HashSet::new()),
Some(until) => {
let transaction_status =
self.get_transaction_status(until, &confirmed_unrooted_slots)?;
match transaction_status {
None => (0, HashSet::new()),
Some((slot, _)) => {
let block = self.get_complete_block(slot, false).map_err(|err| {
BlockstoreError::Io(IoError::new(
ErrorKind::Other,
format!("Unable to get block: {}", err),
))
})?;
// Load all signatures for the block
let mut slot_signatures: Vec<_> = block
.transactions
.into_iter()
.filter_map(|transaction_with_meta| {
transaction_with_meta
.transaction
.signatures
.into_iter()
.next()
})
.collect();
// Sort signatures as a way to entire a stable ordering within a slot, as
// the AddressSignatures column is ordered by signatures within a slot,
// not by block ordering
slot_signatures.sort();
slot_signatures.reverse();
if let Some(pos) = slot_signatures.iter().position(|&x| x == until) {
slot_signatures = slot_signatures.split_off(pos);
}
(slot, slot_signatures.into_iter().collect::<HashSet<_>>())
}
}
}
};
get_until_slot_timer.stop();
// Fetch the list of signatures that affect the given address
let first_available_block = self.get_first_available_block()?;
let mut address_signatures = vec![];
// Get signatures in `slot`
let mut get_initial_slot_timer = Measure::start("get_initial_slot_timer");
let mut signatures = self.find_address_signatures_for_slot(address, slot)?;
signatures.reverse();
if let Some(excluded_signatures) = before_excluded_signatures.take() {
address_signatures.extend(
signatures
.into_iter()
.filter(|(_, signature)| !excluded_signatures.contains(signature)),
)
} else {
address_signatures.append(&mut signatures);
}
get_initial_slot_timer.stop();
// Check the active_transaction_status_index to see if it contains slot. If so, start with
// that index, as it will contain higher slots
let starting_primary_index = *self.active_transaction_status_index.read().unwrap();
let next_primary_index = if starting_primary_index == 0 { 1 } else { 0 };
let next_max_slot = self
.transaction_status_index_cf
.get(next_primary_index)?
.unwrap()
.max_slot;
let mut starting_primary_index_iter_timer = Measure::start("starting_primary_index_iter");
if slot > next_max_slot {
let mut starting_iterator = self.address_signatures_cf.iter(IteratorMode::From(
(starting_primary_index, address, slot, Signature::default()),
IteratorDirection::Reverse,
))?;
// Iterate through starting_iterator until limit is reached
while address_signatures.len() < limit {
if let Some(((i, key_address, slot, signature), _)) = starting_iterator.next() {
if slot == next_max_slot || slot < lowest_slot {
break;
}
if i == starting_primary_index
&& key_address == address
&& slot >= first_available_block
{
if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) {
address_signatures.push((slot, signature));
}
continue;
}
}
break;
}
// Handle slots that cross primary indexes
if next_max_slot >= lowest_slot {
let mut signatures =
self.find_address_signatures_for_slot(address, next_max_slot)?;
signatures.reverse();
address_signatures.append(&mut signatures);
}
}
starting_primary_index_iter_timer.stop();
// Iterate through next_iterator until limit is reached
let mut next_primary_index_iter_timer = Measure::start("next_primary_index_iter_timer");
let mut next_iterator = self.address_signatures_cf.iter(IteratorMode::From(
(next_primary_index, address, slot, Signature::default()),
IteratorDirection::Reverse,
))?;
while address_signatures.len() < limit {
if let Some(((i, key_address, slot, signature), _)) = next_iterator.next() {
// Skip next_max_slot, which is already included
if slot == next_max_slot {
continue;
}
if slot < lowest_slot {
break;
}
if i == next_primary_index
&& key_address == address
&& slot >= first_available_block
{
if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) {
address_signatures.push((slot, signature));
}
continue;
}
}
break;
}
next_primary_index_iter_timer.stop();
let mut address_signatures: Vec<(Slot, Signature)> = address_signatures
.into_iter()
.filter(|(_, signature)| !until_excluded_signatures.contains(signature))
.collect();
address_signatures.truncate(limit);
// Fill in the status information for each found transaction
let mut get_status_info_timer = Measure::start("get_status_info_timer");
let mut infos = vec![];
for (slot, signature) in address_signatures.into_iter() {
let transaction_status =
self.get_transaction_status(signature, &confirmed_unrooted_slots)?;
let err = transaction_status.and_then(|(_slot, status)| status.status.err());
let memo = self.read_transaction_memos(signature)?;
let block_time = self.get_block_time(slot)?;
infos.push(ConfirmedTransactionStatusWithSignature {
signature,
slot,
err,
memo,
block_time,
});
}
get_status_info_timer.stop();
datapoint_info!(
"blockstore-get-conf-sigs-for-addr-2",
(
"get_before_slot_us",
get_before_slot_timer.as_us() as i64,
i64
),
(
"get_initial_slot_us",
get_initial_slot_timer.as_us() as i64,
i64
),
(
"starting_primary_index_iter_us",
starting_primary_index_iter_timer.as_us() as i64,
i64
),
(
"next_primary_index_iter_us",
next_primary_index_iter_timer.as_us() as i64,
i64
),
(
"get_status_info_us",
get_status_info_timer.as_us() as i64,
i64
),
(
"get_until_slot_us",
get_until_slot_timer.as_us() as i64,
i64
)
);
Ok(SignatureInfosForAddress {
infos,
found_before: true, // if `before` signature was not found, this method returned early
})
}
pub fn read_rewards(&self, index: Slot) -> Result<Option<Rewards>> {
self.rewards_cf
.get_protobuf_or_bincode::<Rewards>(index)
.map(|result| result.map(|option| option.into()))
}
pub fn write_rewards(&self, index: Slot, rewards: Rewards) -> Result<()> {
let rewards = rewards.into();
self.rewards_cf.put_protobuf(index, &rewards)
}
pub fn get_recent_perf_samples(&self, num: usize) -> Result<Vec<(Slot, PerfSample)>> {
Ok(self
.db
.iter::<cf::PerfSamples>(IteratorMode::End)?
.take(num)
.map(|(slot, data)| {
let perf_sample = deserialize(&data).unwrap();
(slot, perf_sample)
})
.collect())
}
pub fn write_perf_sample(&self, index: Slot, perf_sample: &PerfSample) -> Result<()> {
self.perf_samples_cf.put(index, perf_sample)
}
pub fn read_program_costs(&self) -> Result<Vec<(Pubkey, u64)>> {
Ok(self
.db
.iter::<cf::ProgramCosts>(IteratorMode::End)?
.map(|(pubkey, data)| {
let program_cost: ProgramCost = deserialize(&data).unwrap();
(pubkey, program_cost.cost)
})
.collect())
}
pub fn write_program_cost(&self, key: &Pubkey, value: &u64) -> Result<()> {
self.program_costs_cf
.put(*key, &ProgramCost { cost: *value })
}
pub fn delete_program_cost(&self, key: &Pubkey) -> Result<()> {
self.program_costs_cf.delete(*key)
}
/// Returns the entry vector for the slot starting with `shred_start_index`
pub fn get_slot_entries(&self, slot: Slot, shred_start_index: u64) -> Result<Vec<Entry>> {
self.get_slot_entries_with_shred_info(slot, shred_start_index, false)
.map(|x| x.0)
}
/// Returns the entry vector for the slot starting with `shred_start_index`, the number of
/// shreds that comprise the entry vector, and whether the slot is full (consumed all shreds).
pub fn get_slot_entries_with_shred_info(
&self,
slot: Slot,
start_index: u64,
allow_dead_slots: bool,
) -> Result<(Vec<Entry>, u64, bool)> {
let (completed_ranges, slot_meta) = self.get_completed_ranges(slot, start_index)?;
// Check if the slot is dead *after* fetching completed ranges to avoid a race
// where a slot is marked dead by another thread before the completed range query finishes.
// This should be sufficient because full slots will never be marked dead from another thread,
// this can only happen during entry processing during replay stage.
if self.is_dead(slot) && !allow_dead_slots {
return Err(BlockstoreError::DeadSlot);
} else if completed_ranges.is_empty() {
return Ok((vec![], 0, false));
}
let slot_meta = slot_meta.unwrap();
let num_shreds = completed_ranges
.last()
.map(|(_, end_index)| u64::from(*end_index) - start_index + 1)
.unwrap_or(0);
let entries: Result<Vec<Vec<Entry>>> = PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
completed_ranges
.par_iter()
.map(|(start_index, end_index)| {
self.get_entries_in_data_block(
slot,
*start_index,
*end_index,
Some(&slot_meta),
)
})
.collect()
})
});
let entries: Vec<Entry> = entries?.into_iter().flatten().collect();
Ok((entries, num_shreds, slot_meta.is_full()))
}
fn get_completed_ranges(
&self,
slot: Slot,
start_index: u64,
) -> Result<(CompletedRanges, Option<SlotMeta>)> {
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
if *lowest_cleanup_slot > slot {
return Err(BlockstoreError::SlotCleanedUp);
}
let slot_meta_cf = self.db.column::<cf::SlotMeta>();
let slot_meta = slot_meta_cf.get(slot)?;
if slot_meta.is_none() {
return Ok((vec![], slot_meta));
}
let slot_meta = slot_meta.unwrap();
// Find all the ranges for the completed data blocks
let completed_ranges = Self::get_completed_data_ranges(
start_index as u32,
&slot_meta.completed_data_indexes,
slot_meta.consumed as u32,
);
Ok((completed_ranges, Some(slot_meta)))
}
// Get the range of indexes [start_index, end_index] of every completed data block
fn get_completed_data_ranges(
start_index: u32,
completed_data_indexes: &BTreeSet<u32>,
consumed: u32,
) -> CompletedRanges {
// `consumed` is the next missing shred index, but shred `i` existing in
// completed_data_end_indexes implies it's not missing
assert!(!completed_data_indexes.contains(&consumed));
completed_data_indexes
.range(start_index..consumed)
.scan(start_index, |begin, index| {
let out = (*begin, *index);
*begin = index + 1;
Some(out)
})
.collect()
}
pub fn get_entries_in_data_block(
&self,
slot: Slot,
start_index: u32,
end_index: u32,
slot_meta: Option<&SlotMeta>,
) -> Result<Vec<Entry>> {
let data_shred_cf = self.db.column::<cf::ShredData>();
// Short circuit on first error
let data_shreds: Result<Vec<Shred>> = (start_index..=end_index)
.map(|i| {
data_shred_cf
.get_bytes((slot, u64::from(i)))
.and_then(|serialized_shred| {
if serialized_shred.is_none() {
if let Some(slot_meta) = slot_meta {
panic!(
"Shred with
slot: {},
index: {},
consumed: {},
completed_indexes: {:?}
must exist if shred index was included in a range: {} {}",
slot,
i,
slot_meta.consumed,
slot_meta.completed_data_indexes,
start_index,
end_index
);
} else {
return Err(BlockstoreError::InvalidShredData(Box::new(
bincode::ErrorKind::Custom(format!(
"Missing shred for slot {}, index {}",
slot, i
)),
)));
}
}
Shred::new_from_serialized_shred(serialized_shred.unwrap()).map_err(|err| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
format!(
"Could not reconstruct shred from shred payload: {:?}",
err
),
)))
})
})
})
.collect();
let data_shreds = data_shreds?;
let last_shred = data_shreds.last().unwrap();
assert!(last_shred.data_complete() || last_shred.last_in_slot());
let deshred_payload = Shredder::deshred(&data_shreds).map_err(|e| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!(
"Could not reconstruct data block from constituent shreds, error: {:?}",
e
))))
})?;
debug!("{:?} shreds in last FEC set", data_shreds.len(),);
bincode::deserialize::<Vec<Entry>>(&deshred_payload).map_err(|e| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!(
"could not reconstruct entries: {:?}",
e
))))
})
}
fn get_any_valid_slot_entries(&self, slot: Slot, start_index: u64) -> Vec<Entry> {
let (completed_ranges, slot_meta) = self
.get_completed_ranges(slot, start_index)
.unwrap_or_default();
if completed_ranges.is_empty() {
return vec![];
}
let slot_meta = slot_meta.unwrap();
let entries: Vec<Vec<Entry>> = PAR_THREAD_POOL_ALL_CPUS.with(|thread_pool| {
thread_pool.borrow().install(|| {
completed_ranges
.par_iter()
.map(|(start_index, end_index)| {
self.get_entries_in_data_block(
slot,
*start_index,
*end_index,
Some(&slot_meta),
)
.unwrap_or_default()
})
.collect()
})
});
entries.into_iter().flatten().collect()
}
// Returns slots connecting to any element of the list `slots`.
pub fn get_slots_since(&self, slots: &[u64]) -> Result<HashMap<u64, Vec<u64>>> {
// Return error if there was a database error during lookup of any of the
// slot indexes
let slot_metas: Result<Vec<Option<SlotMeta>>> =
slots.iter().map(|slot| self.meta(*slot)).collect();
let slot_metas = slot_metas?;
let result: HashMap<u64, Vec<u64>> = slots
.iter()
.zip(slot_metas)
.filter_map(|(height, meta)| meta.map(|meta| (*height, meta.next_slots.to_vec())))
.collect();
Ok(result)
}
pub fn is_root(&self, slot: Slot) -> bool {
matches!(self.db.get::<cf::Root>(slot), Ok(Some(true)))
}
/// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself
/// been rooted. This is either because the slot was skipped, or due to a gap in ledger data,
/// as when booting from a newer snapshot.
pub fn is_skipped(&self, slot: Slot) -> bool {
let lowest_root = self
.rooted_slot_iterator(0)
.ok()
.and_then(|mut iter| iter.next())
.unwrap_or_default();
match self.db.get::<cf::Root>(slot).ok().flatten() {
Some(_) => false,
None => slot < self.max_root() && slot > lowest_root,
}
}
pub fn set_roots<'a>(&self, rooted_slots: impl Iterator<Item = &'a Slot>) -> Result<()> {
let mut write_batch = self.db.batch()?;
let mut max_new_rooted_slot = 0;
for slot in rooted_slots {
max_new_rooted_slot = std::cmp::max(max_new_rooted_slot, *slot);
write_batch.put::<cf::Root>(*slot, &true)?;
}
self.db.write(write_batch)?;
let mut last_root = self.last_root.write().unwrap();
if *last_root == std::u64::MAX {
*last_root = 0;
}
*last_root = cmp::max(max_new_rooted_slot, *last_root);
Ok(())
}
pub fn is_dead(&self, slot: Slot) -> bool {
matches!(
self.db
.get::<cf::DeadSlots>(slot)
.expect("fetch from DeadSlots column family failed"),
Some(true)
)
}
pub fn set_dead_slot(&self, slot: Slot) -> Result<()> {
self.dead_slots_cf.put(slot, &true)
}
pub fn remove_dead_slot(&self, slot: Slot) -> Result<()> {
self.dead_slots_cf.delete(slot)
}
pub fn store_duplicate_if_not_existing(
&self,
slot: Slot,
shred1: Vec<u8>,
shred2: Vec<u8>,
) -> Result<()> {
if !self.has_duplicate_shreds_in_slot(slot) {
self.store_duplicate_slot(slot, shred1, shred2)
} else {
Ok(())
}
}
pub fn store_duplicate_slot(&self, slot: Slot, shred1: Vec<u8>, shred2: Vec<u8>) -> Result<()> {
let duplicate_slot_proof = DuplicateSlotProof::new(shred1, shred2);
self.duplicate_slots_cf.put(slot, &duplicate_slot_proof)
}
pub fn get_duplicate_slot(&self, slot: u64) -> Option<DuplicateSlotProof> {
self.duplicate_slots_cf
.get(slot)
.expect("fetch from DuplicateSlots column family failed")
}
// `new_shred` is assumed to have slot and index equal to the given slot and index.
// Returns the existing shred if `new_shred` is not equal to the existing shred at the
// given slot and index as this implies the leader generated two different shreds with
// the same slot and index
pub fn is_shred_duplicate(
&self,
slot: u64,
index: u32,
mut payload: Vec<u8>,
shred_type: ShredType,
) -> Option<Vec<u8>> {
let existing_shred = match shred_type {
ShredType::Data => self.get_data_shred(slot, index as u64),
ShredType::Code => self.get_coding_shred(slot, index as u64),
}
.expect("fetch from DuplicateSlots column family failed")?;
let size = payload.len().max(SHRED_PAYLOAD_SIZE);
payload.resize(size, 0u8);
let new_shred = Shred::new_from_serialized_shred(payload).unwrap();
(existing_shred != new_shred.payload).then(|| existing_shred)
}
pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool {
self.duplicate_slots_cf
.get(slot)
.expect("fetch from DuplicateSlots column family failed")
.is_some()
}
pub fn orphans_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
let orphans_iter = self
.db
.iter::<cf::Orphans>(IteratorMode::From(slot, IteratorDirection::Forward))?;
Ok(orphans_iter.map(|(slot, _)| slot))
}
pub fn dead_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
let dead_slots_iterator = self
.db
.iter::<cf::DeadSlots>(IteratorMode::From(slot, IteratorDirection::Forward))?;
Ok(dead_slots_iterator.map(|(slot, _)| slot))
}
pub fn duplicate_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
let duplicate_slots_iterator = self
.db
.iter::<cf::DuplicateSlots>(IteratorMode::From(slot, IteratorDirection::Forward))?;
Ok(duplicate_slots_iterator.map(|(slot, _)| slot))
}
pub fn last_root(&self) -> Slot {
*self.last_root.read().unwrap()
}
// find the first available slot in blockstore that has some data in it
pub fn lowest_slot(&self) -> Slot {
for (slot, meta) in self
.slot_meta_iterator(0)
.expect("unable to iterate over meta")
{
if slot > 0 && meta.received > 0 {
return slot;
}
}
// This means blockstore is empty, should never get here aside from right at boot.
self.last_root()
}
pub fn lowest_cleanup_slot(&self) -> Slot {
*self.lowest_cleanup_slot.read().unwrap()
}
pub fn storage_size(&self) -> Result<u64> {
self.db.storage_size()
}
pub fn is_primary_access(&self) -> bool {
self.db.is_primary_access()
}
pub fn scan_and_fix_roots(&self, exit: &Arc<AtomicBool>) -> Result<()> {
let ancestor_iterator = AncestorIterator::new(self.last_root(), self)
.take_while(|&slot| slot >= self.lowest_cleanup_slot());
let mut find_missing_roots = Measure::start("find_missing_roots");
let mut roots_to_fix = vec![];
for slot in ancestor_iterator.filter(|slot| !self.is_root(*slot)) {
if exit.load(Ordering::Relaxed) {
return Ok(());
}
roots_to_fix.push(slot);
}
find_missing_roots.stop();
let mut fix_roots = Measure::start("fix_roots");
if !roots_to_fix.is_empty() {
info!("{} slots to be rooted", roots_to_fix.len());
for chunk in roots_to_fix.chunks(100) {
if exit.load(Ordering::Relaxed) {
return Ok(());
}
trace!("{:?}", chunk);
self.set_roots(chunk.iter())?;
}
} else {
debug!(
"No missing roots found in range {} to {}",
self.lowest_cleanup_slot(),
self.last_root()
);
}
fix_roots.stop();
datapoint_info!(
"blockstore-scan_and_fix_roots",
(
"find_missing_roots_us",
find_missing_roots.as_us() as i64,
i64
),
("num_roots_to_fix", roots_to_fix.len() as i64, i64),
("fix_roots_us", fix_roots.as_us() as i64, i64),
);
Ok(())
}
}
// Update the `completed_data_indexes` with a new shred `new_shred_index`. If a
// data set is complete, return the range of shred indexes [start_index, end_index]
// for that completed data set.
fn update_completed_data_indexes(
is_last_in_data: bool,
new_shred_index: u32,
received_data_shreds: &ShredIndex,
// Shreds indices which are marked data complete.
completed_data_indexes: &mut BTreeSet<u32>,
) -> Vec<(u32, u32)> {
let start_shred_index = completed_data_indexes
.range(..new_shred_index)
.next_back()
.map(|index| index + 1)
.unwrap_or_default();
// Consecutive entries i, k, j in this vector represent potential ranges [i, k),
// [k, j) that could be completed data ranges
let mut shred_indices = vec![start_shred_index];
// `new_shred_index` is data complete, so need to insert here into the
// `completed_data_indexes`
if is_last_in_data {
completed_data_indexes.insert(new_shred_index);
shred_indices.push(new_shred_index + 1);
}
if let Some(index) = completed_data_indexes.range(new_shred_index + 1..).next() {
shred_indices.push(index + 1);
}
shred_indices
.windows(2)
.filter(|ix| {
let (begin, end) = (ix[0] as u64, ix[1] as u64);
let num_shreds = (end - begin) as usize;
received_data_shreds.present_in_bounds(begin..end) == num_shreds
})
.map(|ix| (ix[0], ix[1] - 1))
.collect()
}
fn update_slot_meta(
is_last_in_slot: bool,
is_last_in_data: bool,
slot_meta: &mut SlotMeta,
index: u32,
new_consumed: u64,
reference_tick: u8,
received_data_shreds: &ShredIndex,
) -> Vec<(u32, u32)> {
let maybe_first_insert = slot_meta.received == 0;
// Index is zero-indexed, while the "received" height starts from 1,
// so received = index + 1 for the same shred.
slot_meta.received = cmp::max((u64::from(index) + 1) as u64, slot_meta.received);
if maybe_first_insert && slot_meta.received > 0 {
// predict the timestamp of what would have been the first shred in this slot
let slot_time_elapsed = u64::from(reference_tick) * 1000 / DEFAULT_TICKS_PER_SECOND;
slot_meta.first_shred_timestamp = timestamp() - slot_time_elapsed;
}
slot_meta.consumed = new_consumed;
slot_meta.last_index = {
// If the last index in the slot hasn't been set before, then
// set it to this shred index
if slot_meta.last_index == std::u64::MAX {
if is_last_in_slot {
u64::from(index)
} else {
std::u64::MAX
}
} else {
slot_meta.last_index
}
};
update_completed_data_indexes(
is_last_in_slot || is_last_in_data,
index,
received_data_shreds,
&mut slot_meta.completed_data_indexes,
)
}
fn get_index_meta_entry<'a>(
db: &Database,
slot: Slot,
index_working_set: &'a mut HashMap<u64, IndexMetaWorkingSetEntry>,
index_meta_time: &mut u64,
) -> &'a mut IndexMetaWorkingSetEntry {
let index_cf = db.column::<cf::Index>();
let mut total_start = Measure::start("Total elapsed");
let res = index_working_set.entry(slot).or_insert_with(|| {
let newly_inserted_meta = index_cf
.get(slot)
.unwrap()
.unwrap_or_else(|| Index::new(slot));
IndexMetaWorkingSetEntry {
index: newly_inserted_meta,
did_insert_occur: false,
}
});
total_start.stop();
*index_meta_time += total_start.as_us();
res
}
fn get_slot_meta_entry<'a>(
db: &Database,
slot_meta_working_set: &'a mut HashMap<u64, SlotMetaWorkingSetEntry>,
slot: Slot,
parent_slot: Slot,
) -> &'a mut SlotMetaWorkingSetEntry {
let meta_cf = db.column::<cf::SlotMeta>();
// Check if we've already inserted the slot metadata for this shred's slot
slot_meta_working_set.entry(slot).or_insert_with(|| {
// Store a 2-tuple of the metadata (working copy, backup copy)
if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") {
let backup = Some(meta.clone());
// If parent_slot == std::u64::MAX, then this is one of the orphans inserted
// during the chaining process, see the function find_slot_meta_in_cached_state()
// for details. Slots that are orphans are missing a parent_slot, so we should
// fill in the parent now that we know it.
if is_orphan(&meta) {
meta.parent_slot = parent_slot;
}
SlotMetaWorkingSetEntry::new(Rc::new(RefCell::new(meta)), backup)
} else {
SlotMetaWorkingSetEntry::new(
Rc::new(RefCell::new(SlotMeta::new(slot, parent_slot))),
None,
)
}
})
}
fn get_last_hash<'a>(iterator: impl Iterator<Item = &'a Entry> + 'a) -> Option<Hash> {
iterator.last().map(|entry| entry.hash)
}
fn is_valid_write_to_slot_0(slot_to_write: u64, parent_slot: Slot, last_root: u64) -> bool {
slot_to_write == 0 && last_root == 0 && parent_slot == 0
}
fn send_signals(
new_shreds_signals: &[SyncSender<bool>],
completed_slots_senders: &[SyncSender<Vec<u64>>],
should_signal: bool,
newly_completed_slots: Vec<u64>,
) {
if should_signal {
for signal in new_shreds_signals {
let _ = signal.try_send(true);
}
}
if !completed_slots_senders.is_empty() && !newly_completed_slots.is_empty() {
let mut slots: Vec<_> = (0..completed_slots_senders.len() - 1)
.map(|_| newly_completed_slots.clone())
.collect();
slots.push(newly_completed_slots);
for (signal, slots) in completed_slots_senders.iter().zip(slots.into_iter()) {
let res = signal.try_send(slots);
if let Err(TrySendError::Full(_)) = res {
datapoint_error!(
"blockstore_error",
(
"error",
"Unable to send newly completed slot because channel is full".to_string(),
String
),
);
}
}
}
}
fn commit_slot_meta_working_set(
slot_meta_working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
completed_slots_senders: &[SyncSender<Vec<u64>>],
write_batch: &mut WriteBatch,
) -> Result<(bool, Vec<u64>)> {
let mut should_signal = false;
let mut newly_completed_slots = vec![];
// Check if any metadata was changed, if so, insert the new version of the
// metadata into the write batch
for (slot, slot_meta_entry) in slot_meta_working_set.iter() {
// Any slot that wasn't written to should have been filtered out by now.
assert!(slot_meta_entry.did_insert_occur);
let meta: &SlotMeta = &RefCell::borrow(&*slot_meta_entry.new_slot_meta);
let meta_backup = &slot_meta_entry.old_slot_meta;
if !completed_slots_senders.is_empty() && is_newly_completed_slot(meta, meta_backup) {
newly_completed_slots.push(*slot);
}
// Check if the working copy of the metadata has changed
if Some(meta) != meta_backup.as_ref() {
should_signal = should_signal || slot_has_updates(meta, meta_backup);
write_batch.put::<cf::SlotMeta>(*slot, meta)?;
}
}
Ok((should_signal, newly_completed_slots))
}
// 1) Find the slot metadata in the cache of dirty slot metadata we've previously touched,
// else:
// 2) Search the database for that slot metadata. If still no luck, then:
// 3) Create a dummy orphan slot in the database
fn find_slot_meta_else_create<'a>(
db: &Database,
working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
chained_slots: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot_index: u64,
) -> Result<Rc<RefCell<SlotMeta>>> {
let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index);
if let Some(slot) = result {
Ok(slot)
} else {
find_slot_meta_in_db_else_create(db, slot_index, chained_slots)
}
}
// Search the database for that slot metadata. If still no luck, then
// create a dummy orphan slot in the database
fn find_slot_meta_in_db_else_create(
db: &Database,
slot: Slot,
insert_map: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
) -> Result<Rc<RefCell<SlotMeta>>> {
if let Some(slot_meta) = db.column::<cf::SlotMeta>().get(slot)? {
insert_map.insert(slot, Rc::new(RefCell::new(slot_meta)));
Ok(insert_map.get(&slot).unwrap().clone())
} else {
// If this slot doesn't exist, make a orphan slot. This way we
// remember which slots chained to this one when we eventually get a real shred
// for this slot
insert_map.insert(slot, Rc::new(RefCell::new(SlotMeta::new_orphan(slot))));
Ok(insert_map.get(&slot).unwrap().clone())
}
}
// Find the slot metadata in the cache of dirty slot metadata we've previously touched
fn find_slot_meta_in_cached_state<'a>(
working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot: Slot,
) -> Option<Rc<RefCell<SlotMeta>>> {
if let Some(entry) = working_set.get(&slot) {
Some(entry.new_slot_meta.clone())
} else {
chained_slots.get(&slot).cloned()
}
}
// Chaining based on latest discussion here: https://github.com/solana-labs/solana/pull/2253
fn handle_chaining(
db: &Database,
write_batch: &mut WriteBatch,
working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
) -> Result<()> {
// Handle chaining for all the SlotMetas that were inserted into
working_set.retain(|_, entry| entry.did_insert_occur);
let mut new_chained_slots = HashMap::new();
let working_set_slots: Vec<_> = working_set.keys().collect();
for slot in working_set_slots {
handle_chaining_for_slot(db, write_batch, working_set, &mut new_chained_slots, *slot)?;
}
// Write all the newly changed slots in new_chained_slots to the write_batch
for (slot, meta) in new_chained_slots.iter() {
let meta: &SlotMeta = &RefCell::borrow(&*meta);
write_batch.put::<cf::SlotMeta>(*slot, meta)?;
}
Ok(())
}
fn handle_chaining_for_slot(
db: &Database,
write_batch: &mut WriteBatch,
working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot: Slot,
) -> Result<()> {
let slot_meta_entry = working_set
.get(&slot)
.expect("Slot must exist in the working_set hashmap");
let meta = &slot_meta_entry.new_slot_meta;
let meta_backup = &slot_meta_entry.old_slot_meta;
{
let mut meta_mut = meta.borrow_mut();
let was_orphan_slot = meta_backup.is_some() && is_orphan(meta_backup.as_ref().unwrap());
// If:
// 1) This is a new slot
// 2) slot != 0
// then try to chain this slot to a previous slot
if slot != 0 {
let prev_slot = meta_mut.parent_slot;
// Check if the slot represented by meta_mut is either a new slot or a orphan.
// In both cases we need to run the chaining logic b/c the parent on the slot was
// previously unknown.
if meta_backup.is_none() || was_orphan_slot {
let prev_slot_meta =
find_slot_meta_else_create(db, working_set, new_chained_slots, prev_slot)?;
// This is a newly inserted slot/orphan so run the chaining logic to link it to a
// newly discovered parent
chain_new_slot_to_prev_slot(&mut prev_slot_meta.borrow_mut(), slot, &mut meta_mut);
// If the parent of `slot` is a newly inserted orphan, insert it into the orphans
// column family
if is_orphan(&RefCell::borrow(&*prev_slot_meta)) {
write_batch.put::<cf::Orphans>(prev_slot, &true)?;
}
}
}
// At this point this slot has received a parent, so it's no longer an orphan
if was_orphan_slot {
write_batch.delete::<cf::Orphans>(slot)?;
}
}
// If this is a newly inserted slot, then we know the children of this slot were not previously
// connected to the trunk of the ledger. Thus if slot.is_connected is now true, we need to
// update all child slots with `is_connected` = true because these children are also now newly
// connected to trunk of the ledger
let should_propagate_is_connected =
is_newly_completed_slot(&RefCell::borrow(&*meta), meta_backup)
&& RefCell::borrow(&*meta).is_connected;
if should_propagate_is_connected {
// slot_function returns a boolean indicating whether to explore the children
// of the input slot
let slot_function = |slot: &mut SlotMeta| {
slot.is_connected = true;
// We don't want to set the is_connected flag on the children of non-full
// slots
slot.is_full()
};
traverse_children_mut(
db,
slot,
meta,
working_set,
new_chained_slots,
slot_function,
)?;
}
Ok(())
}
fn traverse_children_mut<F>(
db: &Database,
slot: Slot,
slot_meta: &Rc<RefCell<SlotMeta>>,
working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot_function: F,
) -> Result<()>
where
F: Fn(&mut SlotMeta) -> bool,
{
let mut next_slots: Vec<(u64, Rc<RefCell<SlotMeta>>)> = vec![(slot, slot_meta.clone())];
while !next_slots.is_empty() {
let (_, current_slot) = next_slots.pop().unwrap();
// Check whether we should explore the children of this slot
if slot_function(&mut current_slot.borrow_mut()) {
let current_slot = &RefCell::borrow(&*current_slot);
for next_slot_index in current_slot.next_slots.iter() {
let next_slot = find_slot_meta_else_create(
db,
working_set,
new_chained_slots,
*next_slot_index,
)?;
next_slots.push((*next_slot_index, next_slot));
}
}
}
Ok(())
}
fn is_orphan(meta: &SlotMeta) -> bool {
// If we have no parent, then this is the head of a detached chain of
// slots
!meta.is_parent_set()
}
// 1) Chain current_slot to the previous slot defined by prev_slot_meta
// 2) Determine whether to set the is_connected flag
fn chain_new_slot_to_prev_slot(
prev_slot_meta: &mut SlotMeta,
current_slot: Slot,
current_slot_meta: &mut SlotMeta,
) {
prev_slot_meta.next_slots.push(current_slot);
current_slot_meta.is_connected = prev_slot_meta.is_connected && prev_slot_meta.is_full();
}
fn is_newly_completed_slot(slot_meta: &SlotMeta, backup_slot_meta: &Option<SlotMeta>) -> bool {
slot_meta.is_full()
&& (backup_slot_meta.is_none()
|| slot_meta.consumed != backup_slot_meta.as_ref().unwrap().consumed)
}
fn slot_has_updates(slot_meta: &SlotMeta, slot_meta_backup: &Option<SlotMeta>) -> bool {
// We should signal that there are updates if we extended the chain of consecutive blocks starting
// from block 0, which is true iff:
// 1) The block with index prev_block_index is itself part of the trunk of consecutive blocks
// starting from block 0,
slot_meta.is_connected &&
// AND either:
// 1) The slot didn't exist in the database before, and now we have a consecutive
// block for that slot
((slot_meta_backup.is_none() && slot_meta.consumed != 0) ||
// OR
// 2) The slot did exist, but now we have a new consecutive block for that slot
(slot_meta_backup.is_some() && slot_meta_backup.as_ref().unwrap().consumed != slot_meta.consumed))
}
// Creates a new ledger with slot 0 full of ticks (and only ticks).
//
// Returns the blockhash that can be used to append entries with.
pub fn create_new_ledger(
ledger_path: &Path,
genesis_config: &GenesisConfig,
max_genesis_archive_unpacked_size: u64,
access_type: AccessType,
) -> Result<Hash> {
Blockstore::destroy(ledger_path)?;
genesis_config.write(ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?;
let ticks_per_slot = genesis_config.ticks_per_slot;
let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
let last_hash = entries.last().unwrap().hash;
let version = solana_sdk::shred_version::version_from_hash(&last_hash);
let shredder = Shredder::new(0, 0, Arc::new(Keypair::new()), 0, version).unwrap();
let shreds = shredder.entries_to_shreds(&entries, true, 0).0;
assert!(shreds.last().unwrap().last_in_slot());
blockstore.insert_shreds(shreds, None, false)?;
blockstore.set_roots(std::iter::once(&0))?;
// Explicitly close the blockstore before we create the archived genesis file
drop(blockstore);
let archive_path = ledger_path.join(DEFAULT_GENESIS_ARCHIVE);
let args = vec![
"jcfhS",
archive_path.to_str().unwrap(),
"-C",
ledger_path.to_str().unwrap(),
DEFAULT_GENESIS_FILE,
"rocksdb",
];
let output = std::process::Command::new("tar")
.args(&args)
.output()
.unwrap();
if !output.status.success() {
use std::str::from_utf8;
error!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?"));
error!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?"));
return Err(BlockstoreError::Io(IoError::new(
ErrorKind::Other,
format!(
"Error trying to generate snapshot archive: {}",
output.status
),
)));
}
// ensure the genesis archive can be unpacked and it is under
// max_genesis_archive_unpacked_size, immediately after creating it above.
{
let temp_dir = tempfile::tempdir_in(ledger_path).unwrap();
// unpack into a temp dir, while completely discarding the unpacked files
let unpack_check = unpack_genesis_archive(
&archive_path,
&temp_dir.into_path(),
max_genesis_archive_unpacked_size,
);
if let Err(unpack_err) = unpack_check {
// stash problematic original archived genesis related files to
// examine them later and to prevent validator and ledger-tool from
// naively consuming them
let mut error_messages = String::new();
fs::rename(
&ledger_path.join(DEFAULT_GENESIS_ARCHIVE),
ledger_path.join(format!("{}.failed", DEFAULT_GENESIS_ARCHIVE)),
)
.unwrap_or_else(|e| {
error_messages += &format!(
"/failed to stash problematic {}: {}",
DEFAULT_GENESIS_ARCHIVE, e
)
});
fs::rename(
&ledger_path.join(DEFAULT_GENESIS_FILE),
ledger_path.join(format!("{}.failed", DEFAULT_GENESIS_FILE)),
)
.unwrap_or_else(|e| {
error_messages += &format!(
"/failed to stash problematic {}: {}",
DEFAULT_GENESIS_FILE, e
)
});
fs::rename(
&ledger_path.join("rocksdb"),
ledger_path.join("rocksdb.failed"),
)
.unwrap_or_else(|e| {
error_messages += &format!("/failed to stash problematic rocksdb: {}", e)
});
return Err(BlockstoreError::Io(IoError::new(
ErrorKind::Other,
format!(
"Error checking to unpack genesis archive: {}{}",
unpack_err, error_messages
),
)));
}
}
Ok(last_hash)
}
#[macro_export]
macro_rules! tmp_ledger_name {
() => {
&format!("{}-{}", file!(), line!())
};
}
#[macro_export]
macro_rules! get_tmp_ledger_path {
() => {
$crate::blockstore::get_ledger_path_from_name($crate::tmp_ledger_name!())
};
}
pub fn get_ledger_path_from_name(name: &str) -> PathBuf {
use std::env;
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
let keypair = Keypair::new();
let path = [
out_dir,
"ledger".to_string(),
format!("{}-{}", name, keypair.pubkey()),
]
.iter()
.collect();
// whack any possible collision
let _ignored = fs::remove_dir_all(&path);
path
}
#[macro_export]
macro_rules! create_new_tmp_ledger {
($genesis_config:expr) => {
$crate::blockstore::create_new_ledger_from_name(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::AccessType::PrimaryOnly,
)
};
}
pub fn verify_shred_slots(slot: Slot, parent_slot: Slot, last_root: Slot) -> bool {
if !is_valid_write_to_slot_0(slot, parent_slot, last_root) {
// Check that the parent_slot < slot
if parent_slot >= slot {
return false;
}
// Ignore shreds that chain to slots before the last root
if parent_slot < last_root {
return false;
}
// Above two checks guarantee that by this point, slot > last_root
}
true
}
// Same as `create_new_ledger()` but use a temporary ledger name based on the provided `name`
//
// Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only
// ticks)
pub fn create_new_ledger_from_name(
name: &str,
genesis_config: &GenesisConfig,
access_type: AccessType,
) -> (PathBuf, Hash) {
let ledger_path = get_ledger_path_from_name(name);
let blockhash = create_new_ledger(
&ledger_path,
genesis_config,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
access_type,
)
.unwrap();
(ledger_path, blockhash)
}
pub fn entries_to_test_shreds(
entries: Vec<Entry>,
slot: Slot,
parent_slot: Slot,
is_full_slot: bool,
version: u16,
) -> Vec<Shred> {
Shredder::new(slot, parent_slot, Arc::new(Keypair::new()), 0, version)
.unwrap()
.entries_to_shreds(&entries, is_full_slot, 0)
.0
}
// used for tests only
pub fn make_slot_entries(
slot: Slot,
parent_slot: Slot,
num_entries: u64,
) -> (Vec<Shred>, Vec<Entry>) {
let entries = create_ticks(num_entries, 0, Hash::default());
let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true, 0);
(shreds, entries)
}
// used for tests only
pub fn make_many_slot_entries(
start_slot: Slot,
num_slots: u64,
entries_per_slot: u64,
) -> (Vec<Shred>, Vec<Entry>) {
let mut shreds = vec![];
let mut entries = vec![];
for slot in start_slot..start_slot + num_slots {
let parent_slot = if slot == 0 { 0 } else { slot - 1 };
let (slot_shreds, slot_entries) = make_slot_entries(slot, parent_slot, entries_per_slot);
shreds.extend(slot_shreds);
entries.extend(slot_entries);
}
(shreds, entries)
}
// Create shreds for slots that have a parent-child relationship defined by the input `chain`
// used for tests only
pub fn make_chaining_slot_entries(
chain: &[u64],
entries_per_slot: u64,
) -> Vec<(Vec<Shred>, Vec<Entry>)> {
let mut slots_shreds_and_entries = vec![];
for (i, slot) in chain.iter().enumerate() {
let parent_slot = {
if *slot == 0 || i == 0 {
0
} else {
chain[i - 1]
}
};
let result = make_slot_entries(*slot, parent_slot, entries_per_slot);
slots_shreds_and_entries.push(result);
}
slots_shreds_and_entries
}
#[cfg(not(unix))]
fn adjust_ulimit_nofile(_enforce_ulimit_nofile: bool) -> Result<()> {
Ok(())
}
#[cfg(unix)]
fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> {
// Rocks DB likes to have many open files. The default open file descriptor limit is
// usually not enough
let desired_nofile = 500000;
fn get_nofile() -> libc::rlimit {
let mut nofile = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
if unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut nofile) } != 0 {
warn!("getrlimit(RLIMIT_NOFILE) failed");
}
nofile
}
let mut nofile = get_nofile();
if nofile.rlim_cur < desired_nofile {
nofile.rlim_cur = desired_nofile;
if unsafe { libc::setrlimit(libc::RLIMIT_NOFILE, &nofile) } != 0 {
error!(
"Unable to increase the maximum open file descriptor limit to {}",
desired_nofile
);
if cfg!(target_os = "macos") {
error!(
"On mac OS you may need to run |sudo launchctl limit maxfiles {} {}| first",
desired_nofile, desired_nofile,
);
}
if enforce_ulimit_nofile {
return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit);
}
}
nofile = get_nofile();
}
info!("Maximum open file descriptors: {}", nofile.rlim_cur);
Ok(())
}
#[cfg(test)]
pub mod tests {
use {
super::*,
crate::{
entry::{next_entry, next_entry_mut},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
leader_schedule::{FixedSchedule, LeaderSchedule},
shred::{max_ticks_per_n_shreds, DataShredHeader},
},
assert_matches::assert_matches,
bincode::serialize,
itertools::Itertools,
rand::{seq::SliceRandom, thread_rng},
solana_account_decoder::parse_token::UiTokenAmount,
solana_runtime::bank::{Bank, RewardType},
solana_sdk::{
hash::{self, hash, Hash},
instruction::CompiledInstruction,
packet::PACKET_DATA_SIZE,
pubkey::Pubkey,
signature::Signature,
transaction::TransactionError,
},
solana_storage_proto::convert::generated,
solana_transaction_status::{InnerInstructions, Reward, Rewards, TransactionTokenBalance},
std::{sync::mpsc::channel, thread::Builder, time::Duration},
};
// used for tests only
pub(crate) fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
let mut entries: Vec<Entry> = Vec::new();
for x in 0..num_entries {
let transaction = Transaction::new_with_compiled_instructions(
&[&Keypair::new()],
&[solana_sdk::pubkey::new_rand()],
Hash::default(),
vec![solana_sdk::pubkey::new_rand()],
vec![CompiledInstruction::new(1, &(), vec![0])],
);
entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
let mut tick = create_ticks(1, 0, hash(&serialize(&x).unwrap()));
entries.append(&mut tick);
}
entries
}
#[test]
fn test_create_new_ledger() {
let mint_total = 1_000_000_000_000;
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let ledger = Blockstore::open(&ledger_path).unwrap();
let ticks = create_ticks(genesis_config.ticks_per_slot, 0, genesis_config.hash());
let entries = ledger.get_slot_entries(0, 0).unwrap();
assert_eq!(ticks, entries);
// Destroying database without closing it first is undefined behavior
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_insert_get_bytes() {
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
assert!(num_entries > 1);
let (mut shreds, _) = make_slot_entries(0, 0, num_entries);
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
// Insert last shred, test we can retrieve it
let last_shred = shreds.pop().unwrap();
assert!(last_shred.index() > 0);
ledger
.insert_shreds(vec![last_shred.clone()], None, false)
.unwrap();
let serialized_shred = ledger
.data_shred_cf
.get_bytes((0, last_shred.index() as u64))
.unwrap()
.unwrap();
let deserialized_shred = Shred::new_from_serialized_shred(serialized_shred).unwrap();
assert_eq!(last_shred, deserialized_shred);
// Destroying database without closing it first is undefined behavior
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_write_entries() {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let ticks_per_slot = 10;
let num_slots = 10;
let ledger = Blockstore::open(&ledger_path).unwrap();
let mut ticks = vec![];
//let mut shreds_per_slot = 0 as u64;
let mut shreds_per_slot = vec![];
for i in 0..num_slots {
let mut new_ticks = create_ticks(ticks_per_slot, 0, Hash::default());
let num_shreds = ledger
.write_entries(
i,
0,
0,
ticks_per_slot,
Some(i.saturating_sub(1)),
true,
&Arc::new(Keypair::new()),
new_ticks.clone(),
0,
)
.unwrap() as u64;
shreds_per_slot.push(num_shreds);
ticks.append(&mut new_ticks);
}
for i in 0..num_slots {
let meta = ledger.meta(i).unwrap().unwrap();
let num_shreds = shreds_per_slot[i as usize];
assert_eq!(meta.consumed, num_shreds);
assert_eq!(meta.received, num_shreds);
assert_eq!(meta.last_index, num_shreds - 1);
if i == num_slots - 1 {
assert!(meta.next_slots.is_empty());
} else {
assert_eq!(meta.next_slots, vec![i + 1]);
}
if i == 0 {
assert_eq!(meta.parent_slot, 0);
} else {
assert_eq!(meta.parent_slot, i - 1);
}
assert_eq!(
&ticks[(i * ticks_per_slot) as usize..((i + 1) * ticks_per_slot) as usize],
&ledger.get_slot_entries(i, 0).unwrap()[..]
);
}
/*
// Simulate writing to the end of a slot with existing ticks
ledger
.write_entries(
num_slots,
ticks_per_slot - 1,
ticks_per_slot - 2,
ticks_per_slot,
&ticks[0..2],
)
.unwrap();
let meta = ledger.meta(num_slots).unwrap().unwrap();
assert_eq!(meta.consumed, 0);
// received shred was ticks_per_slot - 2, so received should be ticks_per_slot - 2 + 1
assert_eq!(meta.received, ticks_per_slot - 1);
// last shred index ticks_per_slot - 2 because that's the shred that made tick_height == ticks_per_slot
// for the slot
assert_eq!(meta.last_index, ticks_per_slot - 2);
assert_eq!(meta.parent_slot, num_slots - 1);
assert_eq!(meta.next_slots, vec![num_slots + 1]);
assert_eq!(
&ticks[0..1],
&ledger
.get_slot_entries(num_slots, ticks_per_slot - 2)
.unwrap()[..]
);
// We wrote two entries, the second should spill into slot num_slots + 1
let meta = ledger.meta(num_slots + 1).unwrap().unwrap();
assert_eq!(meta.consumed, 1);
assert_eq!(meta.received, 1);
assert_eq!(meta.last_index, std::u64::MAX);
assert_eq!(meta.parent_slot, num_slots);
assert!(meta.next_slots.is_empty());
assert_eq!(
&ticks[1..2],
&ledger.get_slot_entries(num_slots + 1, 0).unwrap()[..]
);
*/
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_put_get_simple() {
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
// Test meta column family
let meta = SlotMeta::new(0, 1);
ledger.meta_cf.put(0, &meta).unwrap();
let result = ledger
.meta_cf
.get(0)
.unwrap()
.expect("Expected meta object to exist");
assert_eq!(result, meta);
// Test erasure column family
let erasure = vec![1u8; 16];
let erasure_key = (0, 0);
ledger
.code_shred_cf
.put_bytes(erasure_key, &erasure)
.unwrap();
let result = ledger
.code_shred_cf
.get_bytes(erasure_key)
.unwrap()
.expect("Expected erasure object to exist");
assert_eq!(result, erasure);
// Test data column family
let data = vec![2u8; 16];
let data_key = (0, 0);
ledger.data_shred_cf.put_bytes(data_key, &data).unwrap();
let result = ledger
.data_shred_cf
.get_bytes(data_key)
.unwrap()
.expect("Expected data object to exist");
assert_eq!(result, data);
// Destroying database without closing it first is undefined behavior
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_read_shred_bytes() {
let slot = 0;
let (shreds, _) = make_slot_entries(slot, 0, 100);
let num_shreds = shreds.len() as u64;
let shred_bufs: Vec<_> = shreds.iter().map(|shred| shred.payload.clone()).collect();
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
ledger.insert_shreds(shreds, None, false).unwrap();
let mut buf = [0; 4096];
let (_, bytes) = ledger.get_data_shreds(slot, 0, 1, &mut buf).unwrap();
assert_eq!(buf[..bytes], shred_bufs[0][..bytes]);
let (last_index, bytes2) = ledger.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
assert_eq!(last_index, 1);
assert!(bytes2 > bytes);
{
let shred_data_1 = &buf[..bytes];
assert_eq!(shred_data_1, &shred_bufs[0][..bytes]);
let shred_data_2 = &buf[bytes..bytes2];
assert_eq!(shred_data_2, &shred_bufs[1][..bytes2 - bytes]);
}
// buf size part-way into shred[1], should just return shred[0]
let mut buf = vec![0; bytes + 1];
let (last_index, bytes3) = ledger.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
assert_eq!(last_index, 0);
assert_eq!(bytes3, bytes);
let mut buf = vec![0; bytes2 - 1];
let (last_index, bytes4) = ledger.get_data_shreds(slot, 0, 2, &mut buf).unwrap();
assert_eq!(last_index, 0);
assert_eq!(bytes4, bytes);
let mut buf = vec![0; bytes * 2];
let (last_index, bytes6) = ledger
.get_data_shreds(slot, num_shreds - 1, num_shreds, &mut buf)
.unwrap();
assert_eq!(last_index, num_shreds - 1);
{
let shred_data = &buf[..bytes6];
assert_eq!(shred_data, &shred_bufs[(num_shreds - 1) as usize][..bytes6]);
}
// Read out of range
let (last_index, bytes6) = ledger
.get_data_shreds(slot, num_shreds, num_shreds + 2, &mut buf)
.unwrap();
assert_eq!(last_index, 0);
assert_eq!(bytes6, 0);
// Destroying database without closing it first is undefined behavior
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_shred_cleanup_check() {
let slot = 1;
let (shreds, _) = make_slot_entries(slot, 0, 100);
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
ledger.insert_shreds(shreds, None, false).unwrap();
let mut buf = [0; 4096];
assert!(ledger.get_data_shreds(slot, 0, 1, &mut buf).is_ok());
let max_purge_slot = 1;
ledger
.run_purge(0, max_purge_slot, PurgeType::PrimaryIndex)
.unwrap();
*ledger.lowest_cleanup_slot.write().unwrap() = max_purge_slot;
let mut buf = [0; 4096];
assert!(ledger.get_data_shreds(slot, 0, 1, &mut buf).is_err());
}
#[test]
fn test_insert_data_shreds_basic() {
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
assert!(num_entries > 1);
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
let num_shreds = shreds.len() as u64;
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
// Insert last shred, we're missing the other shreds, so no consecutive
// shreds starting from slot 0, index 0 should exist.
assert!(shreds.len() > 1);
let last_shred = shreds.pop().unwrap();
ledger.insert_shreds(vec![last_shred], None, false).unwrap();
assert!(ledger.get_slot_entries(0, 0).unwrap().is_empty());
let meta = ledger
.meta(0)
.unwrap()
.expect("Expected new metadata object to be created");
assert!(meta.consumed == 0 && meta.received == num_shreds);
// Insert the other shreds, check for consecutive returned entries
ledger.insert_shreds(shreds, None, false).unwrap();
let result = ledger.get_slot_entries(0, 0).unwrap();
assert_eq!(result, entries);
let meta = ledger
.meta(0)
.unwrap()
.expect("Expected new metadata object to exist");
assert_eq!(meta.consumed, num_shreds);
assert_eq!(meta.received, num_shreds);
assert_eq!(meta.parent_slot, 0);
assert_eq!(meta.last_index, num_shreds - 1);
assert!(meta.next_slots.is_empty());
assert!(meta.is_connected);
// Destroying database without closing it first is undefined behavior
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_insert_data_shreds_reverse() {
let num_shreds = 10;
let num_entries = max_ticks_per_n_shreds(num_shreds, None);
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
let num_shreds = shreds.len() as u64;
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
// Insert shreds in reverse, check for consecutive returned shreds
for i in (0..num_shreds).rev() {
let shred = shreds.pop().unwrap();
ledger.insert_shreds(vec![shred], None, false).unwrap();
let result = ledger.get_slot_entries(0, 0).unwrap();
let meta = ledger
.meta(0)
.unwrap()
.expect("Expected metadata object to exist");
assert_eq!(meta.last_index, num_shreds - 1);
if i != 0 {
assert_eq!(result.len(), 0);
assert!(meta.consumed == 0 && meta.received == num_shreds as u64);
} else {
assert_eq!(meta.parent_slot, 0);
assert_eq!(result, entries);
assert!(meta.consumed == num_shreds as u64 && meta.received == num_shreds as u64);
}
}
// Destroying database without closing it first is undefined behavior
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_insert_slots() {
test_insert_data_shreds_slots("test_insert_data_shreds_slots_single", false);
test_insert_data_shreds_slots("test_insert_data_shreds_slots_bulk", true);
}
/*
#[test]
pub fn test_iteration_order() {
let slot = 0;
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Write entries
let num_entries = 8;
let entries = make_tiny_test_entries(num_entries);
let mut shreds = entries.to_single_entry_shreds();
for (i, b) in shreds.iter_mut().enumerate() {
b.set_index(1 << (i * 8));
b.set_slot(0);
}
blockstore
.write_shreds(&shreds)
.expect("Expected successful write of shreds");
let mut db_iterator = blockstore
.db
.cursor::<cf::Data>()
.expect("Expected to be able to open database iterator");
db_iterator.seek((slot, 1));
// Iterate through ledger
for i in 0..num_entries {
assert!(db_iterator.valid());
let (_, current_index) = db_iterator.key().expect("Expected a valid key");
assert_eq!(current_index, (1 as u64) << (i * 8));
db_iterator.next();
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
*/
#[test]
pub fn test_get_slot_entries1() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let entries = create_ticks(8, 0, Hash::default());
let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false, 0);
blockstore
.insert_shreds(shreds, None, false)
.expect("Expected successful write of shreds");
let mut shreds1 = entries_to_test_shreds(entries[4..].to_vec(), 1, 0, false, 0);
for (i, b) in shreds1.iter_mut().enumerate() {
b.set_index(8 + i as u32);
}
blockstore
.insert_shreds(shreds1, None, false)
.expect("Expected successful write of shreds");
assert_eq!(
blockstore.get_slot_entries(1, 0).unwrap()[2..4],
entries[2..4],
);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
// This test seems to be unnecessary with introduction of data shreds. There are no
// guarantees that a particular shred index contains a complete entry
#[test]
#[ignore]
pub fn test_get_slot_entries2() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Write entries
let num_slots = 5_u64;
let mut index = 0;
for slot in 0..num_slots {
let entries = create_ticks(slot + 1, 0, Hash::default());
let last_entry = entries.last().unwrap().clone();
let mut shreds =
entries_to_test_shreds(entries, slot, slot.saturating_sub(1), false, 0);
for b in shreds.iter_mut() {
b.set_index(index);
b.set_slot(slot as u64);
index += 1;
}
blockstore
.insert_shreds(shreds, None, false)
.expect("Expected successful write of shreds");
assert_eq!(
blockstore
.get_slot_entries(slot, u64::from(index - 1))
.unwrap(),
vec![last_entry],
);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_get_slot_entries3() {
// Test inserting/fetching shreds which contain multiple entries per shred
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 5_u64;
let shreds_per_slot = 5_u64;
let entry_serialized_size =
bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
let entries_per_slot =
(shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size;
// Write entries
for slot in 0..num_slots {
let entries = create_ticks(entries_per_slot, 0, Hash::default());
let shreds =
entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false, 0);
assert!(shreds.len() as u64 >= shreds_per_slot);
blockstore
.insert_shreds(shreds, None, false)
.expect("Expected successful write of shreds");
assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), entries);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_insert_data_shreds_consecutive() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create enough entries to ensure there are at least two shreds created
let min_entries = max_ticks_per_n_shreds(1, None) + 1;
for i in 0..4 {
let slot = i;
let parent_slot = if i == 0 { 0 } else { i - 1 };
// Write entries
let num_entries = min_entries * (i + 1);
let (shreds, original_entries) = make_slot_entries(slot, parent_slot, num_entries);
let num_shreds = shreds.len() as u64;
assert!(num_shreds > 1);
let mut even_shreds = vec![];
let mut odd_shreds = vec![];
for (i, shred) in shreds.into_iter().enumerate() {
if i % 2 == 0 {
even_shreds.push(shred);
} else {
odd_shreds.push(shred);
}
}
blockstore.insert_shreds(odd_shreds, None, false).unwrap();
assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), vec![]);
let meta = blockstore.meta(slot).unwrap().unwrap();
if num_shreds % 2 == 0 {
assert_eq!(meta.received, num_shreds);
} else {
trace!("got here");
assert_eq!(meta.received, num_shreds - 1);
}
assert_eq!(meta.consumed, 0);
if num_shreds % 2 == 0 {
assert_eq!(meta.last_index, num_shreds - 1);
} else {
assert_eq!(meta.last_index, std::u64::MAX);
}
blockstore.insert_shreds(even_shreds, None, false).unwrap();
assert_eq!(
blockstore.get_slot_entries(slot, 0).unwrap(),
original_entries,
);
let meta = blockstore.meta(slot).unwrap().unwrap();
assert_eq!(meta.received, num_shreds);
assert_eq!(meta.consumed, num_shreds);
assert_eq!(meta.parent_slot, parent_slot);
assert_eq!(meta.last_index, num_shreds - 1);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_data_set_completed_on_insert() {
let ledger_path = get_tmp_ledger_path!();
let BlockstoreSignals { blockstore, .. } =
Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
// Create enough entries to fill 2 shreds, only the later one is data complete
let slot = 0;
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
let entries = create_ticks(num_entries, slot, Hash::default());
let shreds = entries_to_test_shreds(entries, slot, 0, true, 0);
let num_shreds = shreds.len();
assert!(num_shreds > 1);
assert!(blockstore
.insert_shreds(shreds[1..].to_vec(), None, false)
.unwrap()
.0
.is_empty());
assert_eq!(
blockstore
.insert_shreds(vec![shreds[0].clone()], None, false)
.unwrap()
.0,
vec![CompletedDataSetInfo {
slot,
start_index: 0,
end_index: num_shreds as u32 - 1
}]
);
// Inserting shreds again doesn't trigger notification
assert!(blockstore
.insert_shreds(shreds, None, false)
.unwrap()
.0
.is_empty());
}
#[test]
pub fn test_new_shreds_signal() {
// Initialize ledger
let ledger_path = get_tmp_ledger_path!();
let BlockstoreSignals {
blockstore: ledger,
ledger_signal_receiver: recvr,
..
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger);
let entries_per_slot = 50;
// Create entries for slot 0
let (mut shreds, _) = make_slot_entries(0, 0, entries_per_slot);
let shreds_per_slot = shreds.len() as u64;
// Insert second shred, but we're missing the first shred, so no consecutive
// shreds starting from slot 0, index 0 should exist.
ledger
.insert_shreds(vec![shreds.remove(1)], None, false)
.unwrap();
let timer = Duration::new(1, 0);
assert!(recvr.recv_timeout(timer).is_err());
// Insert first shred, now we've made a consecutive block
ledger
.insert_shreds(vec![shreds.remove(0)], None, false)
.unwrap();
// Wait to get notified of update, should only be one update
assert!(recvr.recv_timeout(timer).is_ok());
assert!(recvr.try_recv().is_err());
// Insert the rest of the ticks
ledger.insert_shreds(shreds, None, false).unwrap();
// Wait to get notified of update, should only be one update
assert!(recvr.recv_timeout(timer).is_ok());
assert!(recvr.try_recv().is_err());
// Create some other slots, and send batches of ticks for each slot such that each slot
// is missing the tick at shred index == slot index - 1. Thus, no consecutive blocks
// will be formed
let num_slots = shreds_per_slot;
let mut shreds = vec![];
let mut missing_shreds = vec![];
for slot in 1..num_slots + 1 {
let (mut slot_shreds, _) = make_slot_entries(slot, slot - 1, entries_per_slot);
let missing_shred = slot_shreds.remove(slot as usize - 1);
shreds.extend(slot_shreds);
missing_shreds.push(missing_shred);
}
// Should be no updates, since no new chains from block 0 were formed
ledger.insert_shreds(shreds, None, false).unwrap();
assert!(recvr.recv_timeout(timer).is_err());
// Insert a shred for each slot that doesn't make a consecutive block, we
// should get no updates
let shreds: Vec<_> = (1..num_slots + 1)
.flat_map(|slot| {
let (mut shred, _) = make_slot_entries(slot, slot - 1, 1);
shred[0].set_index(2 * num_slots as u32);
shred
})
.collect();
ledger.insert_shreds(shreds, None, false).unwrap();
assert!(recvr.recv_timeout(timer).is_err());
// For slots 1..num_slots/2, fill in the holes in one batch insertion,
// so we should only get one signal
let missing_shreds2 = missing_shreds
.drain((num_slots / 2) as usize..)
.collect_vec();
ledger.insert_shreds(missing_shreds, None, false).unwrap();
assert!(recvr.recv_timeout(timer).is_ok());
assert!(recvr.try_recv().is_err());
// Fill in the holes for each of the remaining slots, we should get a single update
// for each
ledger.insert_shreds(missing_shreds2, None, false).unwrap();
// Destroying database without closing it first is undefined behavior
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_completed_shreds_signal() {
// Initialize ledger
let ledger_path = get_tmp_ledger_path!();
let BlockstoreSignals {
blockstore: ledger,
completed_slots_receiver: recvr,
..
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger);
let entries_per_slot = 10;
// Create shreds for slot 0
let (mut shreds, _) = make_slot_entries(0, 0, entries_per_slot);
let shred0 = shreds.remove(0);
// Insert all but the first shred in the slot, should not be considered complete
ledger.insert_shreds(shreds, None, false).unwrap();
assert!(recvr.try_recv().is_err());
// Insert first shred, slot should now be considered complete
ledger.insert_shreds(vec![shred0], None, false).unwrap();
assert_eq!(recvr.try_recv().unwrap(), vec![0]);
}
#[test]
pub fn test_completed_shreds_signal_orphans() {
// Initialize ledger
let ledger_path = get_tmp_ledger_path!();
let BlockstoreSignals {
blockstore: ledger,
completed_slots_receiver: recvr,
..
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger);
let entries_per_slot = 10;
let slots = vec![2, 5, 10];
let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot);
// Get the shreds for slot 10, chaining to slot 5
let (mut orphan_child, _) = all_shreds.remove(2);
// Get the shreds for slot 5 chaining to slot 2
let (mut orphan_shreds, _) = all_shreds.remove(1);
// Insert all but the first shred in the slot, should not be considered complete
let orphan_child0 = orphan_child.remove(0);
ledger.insert_shreds(orphan_child, None, false).unwrap();
assert!(recvr.try_recv().is_err());
// Insert first shred, slot should now be considered complete
ledger
.insert_shreds(vec![orphan_child0], None, false)
.unwrap();
assert_eq!(recvr.try_recv().unwrap(), vec![slots[2]]);
// Insert the shreds for the orphan_slot
let orphan_shred0 = orphan_shreds.remove(0);
ledger.insert_shreds(orphan_shreds, None, false).unwrap();
assert!(recvr.try_recv().is_err());
// Insert first shred, slot should now be considered complete
ledger
.insert_shreds(vec![orphan_shred0], None, false)
.unwrap();
assert_eq!(recvr.try_recv().unwrap(), vec![slots[1]]);
}
#[test]
pub fn test_completed_shreds_signal_many() {
// Initialize ledger
let ledger_path = get_tmp_ledger_path!();
let BlockstoreSignals {
blockstore: ledger,
completed_slots_receiver: recvr,
..
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger);
let entries_per_slot = 10;
let mut slots = vec![2, 5, 10];
let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot);
let disconnected_slot = 4;
let (shreds0, _) = all_shreds.remove(0);
let (shreds1, _) = all_shreds.remove(0);
let (shreds2, _) = all_shreds.remove(0);
let (shreds3, _) = make_slot_entries(disconnected_slot, 1, entries_per_slot);
let mut all_shreds: Vec<_> = vec![shreds0, shreds1, shreds2, shreds3]
.into_iter()
.flatten()
.collect();
all_shreds.shuffle(&mut thread_rng());
ledger.insert_shreds(all_shreds, None, false).unwrap();
let mut result = recvr.try_recv().unwrap();
result.sort_unstable();
slots.push(disconnected_slot);
slots.sort_unstable();
assert_eq!(result, slots);
}
#[test]
pub fn test_handle_chaining_basic() {
let blockstore_path = get_tmp_ledger_path!();
{
let entries_per_slot = 5;
let num_slots = 3;
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Construct the shreds
let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
let shreds_per_slot = shreds.len() / num_slots as usize;
// 1) Write to the first slot
let shreds1 = shreds
.drain(shreds_per_slot..2 * shreds_per_slot)
.collect_vec();
blockstore.insert_shreds(shreds1, None, false).unwrap();
let s1 = blockstore.meta(1).unwrap().unwrap();
assert!(s1.next_slots.is_empty());
// Slot 1 is not trunk because slot 0 hasn't been inserted yet
assert!(!s1.is_connected);
assert_eq!(s1.parent_slot, 0);
assert_eq!(s1.last_index, shreds_per_slot as u64 - 1);
// 2) Write to the second slot
let shreds2 = shreds
.drain(shreds_per_slot..2 * shreds_per_slot)
.collect_vec();
blockstore.insert_shreds(shreds2, None, false).unwrap();
let s2 = blockstore.meta(2).unwrap().unwrap();
assert!(s2.next_slots.is_empty());
// Slot 2 is not trunk because slot 0 hasn't been inserted yet
assert!(!s2.is_connected);
assert_eq!(s2.parent_slot, 1);
assert_eq!(s2.last_index, shreds_per_slot as u64 - 1);
// Check the first slot again, it should chain to the second slot,
// but still isn't part of the trunk
let s1 = blockstore.meta(1).unwrap().unwrap();
assert_eq!(s1.next_slots, vec![2]);
assert!(!s1.is_connected);
assert_eq!(s1.parent_slot, 0);
assert_eq!(s1.last_index, shreds_per_slot as u64 - 1);
// 3) Write to the zeroth slot, check that every slot
// is now part of the trunk
blockstore.insert_shreds(shreds, None, false).unwrap();
for i in 0..3 {
let s = blockstore.meta(i).unwrap().unwrap();
// The last slot will not chain to any other slots
if i != 2 {
assert_eq!(s.next_slots, vec![i + 1]);
}
if i == 0 {
assert_eq!(s.parent_slot, 0);
} else {
assert_eq!(s.parent_slot, i - 1);
}
assert_eq!(s.last_index, shreds_per_slot as u64 - 1);
assert!(s.is_connected);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_handle_chaining_missing_slots() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 30;
let entries_per_slot = 5;
// Separate every other slot into two separate vectors
let mut slots = vec![];
let mut missing_slots = vec![];
let mut shreds_per_slot = 2;
for slot in 0..num_slots {
let parent_slot = {
if slot == 0 {
0
} else {
slot - 1
}
};
let (slot_shreds, _) = make_slot_entries(slot, parent_slot, entries_per_slot);
shreds_per_slot = slot_shreds.len();
if slot % 2 == 1 {
slots.extend(slot_shreds);
} else {
missing_slots.extend(slot_shreds);
}
}
// Write the shreds for every other slot
blockstore.insert_shreds(slots, None, false).unwrap();
// Check metadata
for i in 0..num_slots {
// If "i" is the index of a slot we just inserted, then next_slots should be empty
// for slot "i" because no slots chain to that slot, because slot i + 1 is missing.
// However, if it's a slot we haven't inserted, aka one of the gaps, then one of the
// slots we just inserted will chain to that gap, so next_slots for that orphan slot
// won't be empty, but the parent slot is unknown so should equal std::u64::MAX.
let s = blockstore.meta(i as u64).unwrap().unwrap();
if i % 2 == 0 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
assert_eq!(s.parent_slot, std::u64::MAX);
} else {
assert!(s.next_slots.is_empty());
assert_eq!(s.parent_slot, i - 1);
}
if i == 0 {
assert!(s.is_connected);
} else {
assert!(!s.is_connected);
}
}
// Write the shreds for the other half of the slots that we didn't insert earlier
blockstore
.insert_shreds(missing_slots, None, false)
.unwrap();
for i in 0..num_slots {
// Check that all the slots chain correctly once the missing slots
// have been filled
let s = blockstore.meta(i as u64).unwrap().unwrap();
if i != num_slots - 1 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
} else {
assert!(s.next_slots.is_empty());
}
if i == 0 {
assert_eq!(s.parent_slot, 0);
} else {
assert_eq!(s.parent_slot, i - 1);
}
assert_eq!(s.last_index, shreds_per_slot as u64 - 1);
assert!(s.is_connected);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
#[allow(clippy::cognitive_complexity)]
pub fn test_forward_chaining_is_connected() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 15;
// Create enough entries to ensure there are at least two shreds created
let entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
assert!(entries_per_slot > 1);
let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
let shreds_per_slot = shreds.len() / num_slots as usize;
assert!(shreds_per_slot > 1);
// Write the shreds such that every 3rd slot has a gap in the beginning
let mut missing_shreds = vec![];
for slot in 0..num_slots {
let mut shreds_for_slot = shreds.drain(..shreds_per_slot).collect_vec();
if slot % 3 == 0 {
let shred0 = shreds_for_slot.remove(0);
missing_shreds.push(shred0);
}
blockstore
.insert_shreds(shreds_for_slot, None, false)
.unwrap();
}
// Check metadata
for i in 0..num_slots {
let s = blockstore.meta(i as u64).unwrap().unwrap();
// The last slot will not chain to any other slots
if i as u64 != num_slots - 1 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
} else {
assert!(s.next_slots.is_empty());
}
if i == 0 {
assert_eq!(s.parent_slot, 0);
} else {
assert_eq!(s.parent_slot, i - 1);
}
assert_eq!(s.last_index, shreds_per_slot as u64 - 1);
// Other than slot 0, no slots should be part of the trunk
if i != 0 {
assert!(!s.is_connected);
} else {
assert!(s.is_connected);
}
}
// Iteratively finish every 3rd slot, and check that all slots up to and including
// slot_index + 3 become part of the trunk
for slot_index in 0..num_slots {
if slot_index % 3 == 0 {
let shred = missing_shreds.remove(0);
blockstore.insert_shreds(vec![shred], None, false).unwrap();
for i in 0..num_slots {
let s = blockstore.meta(i as u64).unwrap().unwrap();
if i != num_slots - 1 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
} else {
assert!(s.next_slots.is_empty());
}
if i <= slot_index as u64 + 3 {
assert!(s.is_connected);
} else {
assert!(!s.is_connected);
}
if i == 0 {
assert_eq!(s.parent_slot, 0);
} else {
assert_eq!(s.parent_slot, i - 1);
}
assert_eq!(s.last_index, shreds_per_slot as u64 - 1);
}
}
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
/*
#[test]
pub fn test_chaining_tree() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_tree_levels = 6;
assert!(num_tree_levels > 1);
let branching_factor: u64 = 4;
// Number of slots that will be in the tree
let num_slots = (branching_factor.pow(num_tree_levels) - 1) / (branching_factor - 1);
let erasure_config = ErasureConfig::default();
let entries_per_slot = erasure_config.num_data() as u64;
assert!(entries_per_slot > 1);
let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
// Insert tree one slot at a time in a random order
let mut slots: Vec<_> = (0..num_slots).collect();
// Get shreds for the slot
slots.shuffle(&mut thread_rng());
for slot in slots {
// Get shreds for the slot "slot"
let slot_shreds = &mut shreds
[(slot * entries_per_slot) as usize..((slot + 1) * entries_per_slot) as usize];
for shred in slot_shreds.iter_mut() {
// Get the parent slot of the slot in the tree
let slot_parent = {
if slot == 0 {
0
} else {
(slot - 1) / branching_factor
}
};
shred.set_parent(slot_parent);
}
let shared_shreds: Vec<_> = slot_shreds
.iter()
.cloned()
.map(|shred| Arc::new(RwLock::new(shred)))
.collect();
let mut coding_generator = CodingGenerator::new_from_config(&erasure_config);
let coding_shreds = coding_generator.next(&shared_shreds);
assert_eq!(coding_shreds.len(), erasure_config.num_coding());
let mut rng = thread_rng();
// Randomly pick whether to insert erasure or coding shreds first
if rng.gen_bool(0.5) {
blockstore.write_shreds(slot_shreds).unwrap();
blockstore.put_shared_coding_shreds(&coding_shreds).unwrap();
} else {
blockstore.put_shared_coding_shreds(&coding_shreds).unwrap();
blockstore.write_shreds(slot_shreds).unwrap();
}
}
// Make sure everything chains correctly
let last_level =
(branching_factor.pow(num_tree_levels - 1) - 1) / (branching_factor - 1);
for slot in 0..num_slots {
let slot_meta = blockstore.meta(slot).unwrap().unwrap();
assert_eq!(slot_meta.consumed, entries_per_slot);
assert_eq!(slot_meta.received, entries_per_slot);
assert!(slot_meta.is_connected);
let slot_parent = {
if slot == 0 {
0
} else {
(slot - 1) / branching_factor
}
};
assert_eq!(slot_meta.parent_slot, slot_parent);
let expected_children: HashSet<_> = {
if slot >= last_level {
HashSet::new()
} else {
let first_child_slot = min(num_slots - 1, slot * branching_factor + 1);
let last_child_slot = min(num_slots - 1, (slot + 1) * branching_factor);
(first_child_slot..last_child_slot + 1).collect()
}
};
let result: HashSet<_> = slot_meta.next_slots.iter().cloned().collect();
if expected_children.len() != 0 {
assert_eq!(slot_meta.next_slots.len(), branching_factor as usize);
} else {
assert_eq!(slot_meta.next_slots.len(), 0);
}
assert_eq!(expected_children, result);
}
// No orphan slots should exist
assert!(blockstore.orphans_cf.is_empty().unwrap())
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
*/
#[test]
pub fn test_get_slots_since() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Slot doesn't exist
assert!(blockstore.get_slots_since(&[0]).unwrap().is_empty());
let mut meta0 = SlotMeta::new(0, 0);
blockstore.meta_cf.put(0, &meta0).unwrap();
// Slot exists, chains to nothing
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![])].into_iter().collect();
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
meta0.next_slots = vec![1, 2];
blockstore.meta_cf.put(0, &meta0).unwrap();
// Slot exists, chains to some other slots
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2])].into_iter().collect();
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected);
let mut meta3 = SlotMeta::new(3, 1);
meta3.next_slots = vec![10, 5];
blockstore.meta_cf.put(3, &meta3).unwrap();
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2]), (3, vec![10, 5])]
.into_iter()
.collect();
assert_eq!(blockstore.get_slots_since(&[0, 1, 3]).unwrap(), expected);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_orphans() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create shreds and entries
let entries_per_slot = 1;
let (mut shreds, _) = make_many_slot_entries(0, 3, entries_per_slot);
let shreds_per_slot = shreds.len() / 3;
// Write slot 2, which chains to slot 1. We're missing slot 0,
// so slot 1 is the orphan
let shreds_for_slot = shreds.drain((shreds_per_slot * 2)..).collect_vec();
blockstore
.insert_shreds(shreds_for_slot, None, false)
.unwrap();
let meta = blockstore
.meta(1)
.expect("Expect database get to succeed")
.unwrap();
assert!(is_orphan(&meta));
assert_eq!(
blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
vec![1]
);
// Write slot 1 which chains to slot 0, so now slot 0 is the
// orphan, and slot 1 is no longer the orphan.
let shreds_for_slot = shreds.drain(shreds_per_slot..).collect_vec();
blockstore
.insert_shreds(shreds_for_slot, None, false)
.unwrap();
let meta = blockstore
.meta(1)
.expect("Expect database get to succeed")
.unwrap();
assert!(!is_orphan(&meta));
let meta = blockstore
.meta(0)
.expect("Expect database get to succeed")
.unwrap();
assert!(is_orphan(&meta));
assert_eq!(
blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
vec![0]
);
// Write some slot that also chains to existing slots and orphan,
// nothing should change
let (shred4, _) = make_slot_entries(4, 0, 1);
let (shred5, _) = make_slot_entries(5, 1, 1);
blockstore.insert_shreds(shred4, None, false).unwrap();
blockstore.insert_shreds(shred5, None, false).unwrap();
assert_eq!(
blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(),
vec![0]
);
// Write zeroth slot, no more orphans
blockstore.insert_shreds(shreds, None, false).unwrap();
for i in 0..3 {
let meta = blockstore
.meta(i)
.expect("Expect database get to succeed")
.unwrap();
assert!(!is_orphan(&meta));
}
// Orphans cf is empty
assert!(blockstore.orphans_cf.is_empty().unwrap())
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
fn test_insert_data_shreds_slots(name: &str, should_bulk_write: bool) {
let blockstore_path = get_ledger_path_from_name(name);
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create shreds and entries
let num_entries = 20_u64;
let mut entries = vec![];
let mut shreds = vec![];
let mut num_shreds_per_slot = 0;
for slot in 0..num_entries {
let parent_slot = {
if slot == 0 {
0
} else {
slot - 1
}
};
let (mut shred, entry) = make_slot_entries(slot, parent_slot, 1);
num_shreds_per_slot = shred.len() as u64;
shred
.iter_mut()
.enumerate()
.for_each(|(_, shred)| shred.set_index(0));
shreds.extend(shred);
entries.extend(entry);
}
let num_shreds = shreds.len();
// Write shreds to the database
if should_bulk_write {
blockstore.insert_shreds(shreds, None, false).unwrap();
} else {
for _ in 0..num_shreds {
let shred = shreds.remove(0);
blockstore.insert_shreds(vec![shred], None, false).unwrap();
}
}
for i in 0..num_entries - 1 {
assert_eq!(
blockstore.get_slot_entries(i, 0).unwrap()[0],
entries[i as usize]
);
let meta = blockstore.meta(i).unwrap().unwrap();
assert_eq!(meta.received, 1);
assert_eq!(meta.last_index, 0);
if i != 0 {
assert_eq!(meta.parent_slot, i - 1);
assert_eq!(meta.consumed, 1);
} else {
assert_eq!(meta.parent_slot, 0);
assert_eq!(meta.consumed, num_shreds_per_slot);
}
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_find_missing_data_indexes() {
let slot = 0;
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Write entries
let gap: u64 = 10;
assert!(gap > 3);
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
let entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0);
let num_shreds = shreds.len();
assert!(num_shreds > 1);
for (i, s) in shreds.iter_mut().enumerate() {
s.set_index(i as u32 * gap as u32);
s.set_slot(slot);
}
blockstore.insert_shreds(shreds, None, false).unwrap();
// Index of the first shred is 0
// Index of the second shred is "gap"
// Thus, the missing indexes should then be [1, gap - 1] for the input index
// range of [0, gap)
let expected: Vec<u64> = (1..gap).collect();
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 0, gap, gap as usize),
expected
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 1, gap, (gap - 1) as usize),
expected,
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 0, gap - 1, (gap - 1) as usize),
&expected[..expected.len() - 1],
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, gap - 2, gap, gap as usize),
vec![gap - 2, gap - 1],
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, gap - 2, gap, 1),
vec![gap - 2],
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 0, gap, 1),
vec![1],
);
// Test with a range that encompasses a shred with index == gap which was
// already inserted.
let mut expected: Vec<u64> = (1..gap).collect();
expected.push(gap + 1);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap + 2) as usize),
expected,
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap - 1) as usize),
&expected[..expected.len() - 1],
);
for i in 0..num_shreds as u64 {
for j in 0..i {
let expected: Vec<u64> = (j..i)
.flat_map(|k| {
let begin = k * gap + 1;
let end = (k + 1) * gap;
begin..end
})
.collect();
assert_eq!(
blockstore.find_missing_data_indexes(
slot,
0,
j * gap,
i * gap,
((i - j) * gap) as usize
),
expected,
);
}
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_find_missing_data_indexes_timeout() {
let slot = 0;
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Write entries
let gap: u64 = 10;
let shreds: Vec<_> = (0..64)
.map(|i| {
Shred::new_from_data(
slot,
(i * gap) as u32,
0,
None,
false,
false,
i as u8,
0,
(i * gap) as u32,
)
})
.collect();
blockstore.insert_shreds(shreds, None, false).unwrap();
let empty: Vec<u64> = vec![];
assert_eq!(
blockstore.find_missing_data_indexes(slot, timestamp(), 0, 50, 1),
empty
);
let expected: Vec<_> = (1..=9).collect();
assert_eq!(
blockstore.find_missing_data_indexes(slot, timestamp() - 400, 0, 50, 9),
expected
);
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_find_missing_data_indexes_sanity() {
let slot = 0;
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Early exit conditions
let empty: Vec<u64> = vec![];
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 0, 0, 1),
empty
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 5, 5, 1),
empty
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 4, 3, 1),
empty
);
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, 1, 2, 0),
empty
);
let entries = create_ticks(100, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0);
assert!(shreds.len() > 2);
shreds.drain(2..);
const ONE: u64 = 1;
const OTHER: u64 = 4;
shreds[0].set_index(ONE as u32);
shreds[1].set_index(OTHER as u32);
// Insert one shred at index = first_index
blockstore.insert_shreds(shreds, None, false).unwrap();
const STARTS: u64 = OTHER * 2;
const END: u64 = OTHER * 3;
const MAX: usize = 10;
// The first shred has index = first_index. Thus, for i < first_index,
// given the input range of [i, first_index], the missing indexes should be
// [i, first_index - 1]
for start in 0..STARTS {
let result = blockstore.find_missing_data_indexes(
slot, 0, start, // start
END, //end
MAX, //max
);
let expected: Vec<u64> = (start..END).filter(|i| *i != ONE && *i != OTHER).collect();
assert_eq!(result, expected);
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_no_missing_shred_indexes() {
let slot = 0;
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Write entries
let num_entries = 10;
let entries = create_ticks(num_entries, 0, Hash::default());
let shreds = entries_to_test_shreds(entries, slot, 0, true, 0);
let num_shreds = shreds.len();
blockstore.insert_shreds(shreds, None, false).unwrap();
let empty: Vec<u64> = vec![];
for i in 0..num_shreds as u64 {
for j in 0..i {
assert_eq!(
blockstore.find_missing_data_indexes(slot, 0, j, i, (i - j) as usize),
empty
);
}
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_should_insert_data_shred() {
solana_logger::setup();
let (mut shreds, _) = make_slot_entries(0, 0, 200);
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let last_root = RwLock::new(0);
// Insert the first 5 shreds, we don't have a "is_last" shred yet
blockstore
.insert_shreds(shreds[0..5].to_vec(), None, false)
.unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
// Corrupt shred by making it too large
let mut shred5 = shreds[5].clone();
shred5.payload.push(10);
shred5.data_header.size = shred5.payload.len() as u16;
assert!(!blockstore.should_insert_data_shred(
&shred5,
&slot_meta,
&HashMap::new(),
&last_root,
None,
ShredSource::Turbine
));
// Ensure that an empty shred (one with no data) would get inserted. Such shreds
// may be used as signals (broadcast does so to indicate a slot was interrupted)
// Reuse shred5's header values to avoid a false negative result
let mut empty_shred = Shred::new_from_data(
shred5.common_header.slot,
shred5.common_header.index,
shred5.data_header.parent_offset,
None, // data
true, // is_last_data
true, // is_last_in_slot
0, // reference_tick
shred5.common_header.version,
shred5.common_header.fec_set_index,
);
assert!(blockstore.should_insert_data_shred(
&empty_shred,
&slot_meta,
&HashMap::new(),
&last_root,
None,
ShredSource::Repaired,
));
empty_shred.data_header.size = 0;
assert!(!blockstore.should_insert_data_shred(
&empty_shred,
&slot_meta,
&HashMap::new(),
&last_root,
None,
ShredSource::Recovered,
));
// Trying to insert another "is_last" shred with index < the received index should fail
// skip over shred 7
blockstore
.insert_shreds(shreds[8..9].to_vec(), None, false)
.unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
assert_eq!(slot_meta.received, 9);
let shred7 = {
if shreds[7].is_data() {
shreds[7].set_last_in_slot();
shreds[7].clone()
} else {
panic!("Shred in unexpected format")
}
};
assert!(!blockstore.should_insert_data_shred(
&shred7,
&slot_meta,
&HashMap::new(),
&last_root,
None,
ShredSource::Repaired,
));
assert!(blockstore.has_duplicate_shreds_in_slot(0));
// Insert all pending shreds
let mut shred8 = shreds[8].clone();
blockstore.insert_shreds(shreds, None, false).unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
// Trying to insert a shred with index > the "is_last" shred should fail
if shred8.is_data() {
shred8.set_slot(slot_meta.last_index + 1);
} else {
panic!("Shred in unexpected format")
}
assert!(!blockstore.should_insert_data_shred(
&shred7,
&slot_meta,
&HashMap::new(),
&last_root,
None,
ShredSource::Repaired,
));
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_is_data_shred_present() {
let (shreds, _) = make_slot_entries(0, 0, 200);
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let index_cf = blockstore.db.column::<cf::Index>();
blockstore
.insert_shreds(shreds[0..5].to_vec(), None, false)
.unwrap();
// Insert a shred less than `slot_meta.consumed`, check that
// it already exists
let slot_meta = blockstore.meta(0).unwrap().unwrap();
let index = index_cf.get(0).unwrap().unwrap();
assert_eq!(slot_meta.consumed, 5);
assert!(Blockstore::is_data_shred_present(
&shreds[1],
&slot_meta,
index.data(),
));
// Insert a shred, check that it already exists
blockstore
.insert_shreds(shreds[6..7].to_vec(), None, false)
.unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
let index = index_cf.get(0).unwrap().unwrap();
assert!(Blockstore::is_data_shred_present(
&shreds[6],
&slot_meta,
index.data()
),);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_check_cache_coding_shred() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let slot = 1;
let (shred, coding) = Shredder::new_coding_shred_header(
slot, 11, // index
11, // fec_set_index
11, // num_data_shreds
11, // num_coding_shreds
8, // position
0, // version
);
let coding_shred =
Shred::new_empty_from_header(shred, DataShredHeader::default(), coding);
let mut erasure_metas = HashMap::new();
let mut index_working_set = HashMap::new();
let mut just_received_coding_shreds = HashMap::new();
let mut index_meta_time = 0;
assert!(blockstore.check_cache_coding_shred(
coding_shred.clone(),
&mut erasure_metas,
&mut index_working_set,
&mut just_received_coding_shreds,
&mut index_meta_time,
&|_shred| {
panic!("no dupes");
},
false,
false,
&mut BlockstoreInsertionMetrics::default(),
));
// insert again fails on dupe
use std::sync::atomic::{AtomicUsize, Ordering};
let counter = AtomicUsize::new(0);
assert!(!blockstore.check_cache_coding_shred(
coding_shred,
&mut erasure_metas,
&mut index_working_set,
&mut just_received_coding_shreds,
&mut index_meta_time,
&|_shred| {
counter.fetch_add(1, Ordering::Relaxed);
},
false,
false,
&mut BlockstoreInsertionMetrics::default(),
));
assert_eq!(counter.load(Ordering::Relaxed), 1);
}
}
#[test]
pub fn test_should_insert_coding_shred() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let last_root = RwLock::new(0);
let slot = 1;
let (mut shred, coding) = Shredder::new_coding_shred_header(
slot, 11, // index
11, // fec_set_index
11, // num_data_shreds
11, // num_coding_shreds
8, // position
0, // version
);
let coding_shred = Shred::new_empty_from_header(
shred.clone(),
DataShredHeader::default(),
coding.clone(),
);
// Insert a good coding shred
assert!(Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
// Insertion should succeed
blockstore
.insert_shreds(vec![coding_shred.clone()], None, false)
.unwrap();
// Trying to insert the same shred again should pass since this doesn't check for
// duplicate index
{
assert!(Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
}
shred.index += 1;
// Establish a baseline that works
{
let coding_shred = Shred::new_empty_from_header(
shred.clone(),
DataShredHeader::default(),
coding.clone(),
);
assert!(Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
}
// Trying to insert a shred with index < position should fail
{
let mut coding_shred = Shred::new_empty_from_header(
shred.clone(),
DataShredHeader::default(),
coding.clone(),
);
let index = coding_shred.index() - coding_shred.common_header.fec_set_index - 1;
coding_shred.set_index(index as u32);
assert!(!Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
}
// Trying to insert shred with num_coding == 0 should fail
{
let mut coding_shred = Shred::new_empty_from_header(
shred.clone(),
DataShredHeader::default(),
coding.clone(),
);
coding_shred.coding_header.num_coding_shreds = 0;
assert!(!Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
}
// Trying to insert shred with pos >= num_coding should fail
{
let mut coding_shred = Shred::new_empty_from_header(
shred.clone(),
DataShredHeader::default(),
coding.clone(),
);
let num_coding_shreds =
coding_shred.common_header.index - coding_shred.common_header.fec_set_index;
coding_shred.coding_header.num_coding_shreds = num_coding_shreds as u16;
assert!(!Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
}
// Trying to insert with set_index with num_coding that would imply the last shred
// has index > u32::MAX should fail
{
let mut coding_shred = Shred::new_empty_from_header(
shred.clone(),
DataShredHeader::default(),
coding.clone(),
);
coding_shred.common_header.fec_set_index = std::u32::MAX - 1;
coding_shred.coding_header.num_coding_shreds = 3;
coding_shred.common_header.index = std::u32::MAX - 1;
assert!(!Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
coding_shred.coding_header.num_coding_shreds = 2000;
assert!(!Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
// Decreasing the number of num_coding_shreds will put it within the allowed limit
coding_shred.coding_header.num_coding_shreds = 2;
assert!(Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
// Insertion should succeed
blockstore
.insert_shreds(vec![coding_shred], None, false)
.unwrap();
}
// Trying to insert value into slot <= than last root should fail
{
let mut coding_shred =
Shred::new_empty_from_header(shred, DataShredHeader::default(), coding);
coding_shred.set_slot(*last_root.read().unwrap());
assert!(!Blockstore::should_insert_coding_shred(
&coding_shred,
&last_root
));
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_insert_multiple_is_last() {
solana_logger::setup();
let (shreds, _) = make_slot_entries(0, 0, 20);
let num_shreds = shreds.len() as u64;
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
assert_eq!(slot_meta.consumed, num_shreds);
assert_eq!(slot_meta.received, num_shreds);
assert_eq!(slot_meta.last_index, num_shreds - 1);
assert!(slot_meta.is_full());
let (shreds, _) = make_slot_entries(0, 0, 22);
blockstore.insert_shreds(shreds, None, false).unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
assert_eq!(slot_meta.consumed, num_shreds);
assert_eq!(slot_meta.received, num_shreds);
assert_eq!(slot_meta.last_index, num_shreds - 1);
assert!(slot_meta.is_full());
assert!(blockstore.has_duplicate_shreds_in_slot(0));
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_slot_data_iterator() {
// Construct the shreds
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let shreds_per_slot = 10;
let slots = vec![2, 4, 8, 12];
let all_shreds = make_chaining_slot_entries(&slots, shreds_per_slot);
let slot_8_shreds = all_shreds[2].0.clone();
for (slot_shreds, _) in all_shreds {
blockstore.insert_shreds(slot_shreds, None, false).unwrap();
}
// Slot doesnt exist, iterator should be empty
let shred_iter = blockstore.slot_data_iterator(5, 0).unwrap();
let result: Vec<_> = shred_iter.collect();
assert_eq!(result, vec![]);
// Test that the iterator for slot 8 contains what was inserted earlier
let shred_iter = blockstore.slot_data_iterator(8, 0).unwrap();
let result: Vec<Shred> = shred_iter
.filter_map(|(_, bytes)| Shred::new_from_serialized_shred(bytes.to_vec()).ok())
.collect();
assert_eq!(result.len(), slot_8_shreds.len());
assert_eq!(result, slot_8_shreds);
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_set_roots() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let chained_slots = vec![0, 2, 4, 7, 12, 15];
assert_eq!(blockstore.last_root(), 0);
blockstore.set_roots(chained_slots.iter()).unwrap();
assert_eq!(blockstore.last_root(), 15);
for i in chained_slots {
assert!(blockstore.is_root(i));
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_is_skipped() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let roots = vec![2, 4, 7, 12, 15];
blockstore.set_roots(roots.iter()).unwrap();
for i in 0..20 {
if i < 2 || roots.contains(&i) || i > 15 {
assert!(!blockstore.is_skipped(i));
} else {
assert!(blockstore.is_skipped(i));
}
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_iter_bounds() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// slot 5 does not exist, iter should be ok and should be a noop
blockstore
.slot_meta_iterator(5)
.unwrap()
.for_each(|_| panic!());
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_get_completed_data_ranges() {
let completed_data_end_indexes = [2, 4, 9, 11].iter().copied().collect();
// Consumed is 1, which means we're missing shred with index 1, should return empty
let start_index = 0;
let consumed = 1;
assert_eq!(
Blockstore::get_completed_data_ranges(
start_index,
&completed_data_end_indexes,
consumed
),
vec![]
);
let start_index = 0;
let consumed = 3;
assert_eq!(
Blockstore::get_completed_data_ranges(
start_index,
&completed_data_end_indexes,
consumed
),
vec![(0, 2)]
);
// Test all possible ranges:
//
// `consumed == completed_data_end_indexes[j] + 1`, means we have all the shreds up to index
// `completed_data_end_indexes[j] + 1`. Thus the completed data blocks is everything in the
// range:
// [start_index, completed_data_end_indexes[j]] ==
// [completed_data_end_indexes[i], completed_data_end_indexes[j]],
let completed_data_end_indexes: Vec<_> = completed_data_end_indexes.into_iter().collect();
for i in 0..completed_data_end_indexes.len() {
for j in i..completed_data_end_indexes.len() {
let start_index = completed_data_end_indexes[i];
let consumed = completed_data_end_indexes[j] + 1;
// When start_index == completed_data_end_indexes[i], then that means
// the shred with index == start_index is a single-shred data block,
// so the start index is the end index for that data block.
let mut expected = vec![(start_index, start_index)];
expected.extend(
completed_data_end_indexes[i..=j]
.windows(2)
.map(|end_indexes| (end_indexes[0] + 1, end_indexes[1])),
);
let completed_data_end_indexes =
completed_data_end_indexes.iter().copied().collect();
assert_eq!(
Blockstore::get_completed_data_ranges(
start_index,
&completed_data_end_indexes,
consumed
),
expected
);
}
}
}
#[test]
fn test_get_slot_entries_with_shred_count_corruption() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_ticks = 8;
let entries = create_ticks(num_ticks, 0, Hash::default());
let slot = 1;
let shreds = entries_to_test_shreds(entries, slot, 0, false, 0);
let next_shred_index = shreds.len();
blockstore
.insert_shreds(shreds, None, false)
.expect("Expected successful write of shreds");
assert_eq!(
blockstore.get_slot_entries(slot, 0).unwrap().len() as u64,
num_ticks
);
// Insert an empty shred that won't deshred into entries
let shreds = vec![Shred::new_from_data(
slot,
next_shred_index as u32,
1,
Some(&[1, 1, 1]),
true,
true,
0,
0,
next_shred_index as u32,
)];
// With the corruption, nothing should be returned, even though an
// earlier data block was valid
blockstore
.insert_shreds(shreds, None, false)
.expect("Expected successful write of shreds");
assert!(blockstore.get_slot_entries(slot, 0).is_err());
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_no_insert_but_modify_slot_meta() {
// This tests correctness of the SlotMeta in various cases in which a shred
// that gets filtered out by checks
let (shreds0, _) = make_slot_entries(0, 0, 200);
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Insert the first 5 shreds, we don't have a "is_last" shred yet
blockstore
.insert_shreds(shreds0[0..5].to_vec(), None, false)
.unwrap();
// Insert a repetitive shred for slot 's', should get ignored, but also
// insert shreds that chains to 's', should see the update in the SlotMeta
// for 's'.
let (mut shreds2, _) = make_slot_entries(2, 0, 200);
let (mut shreds3, _) = make_slot_entries(3, 0, 200);
shreds2.push(shreds0[1].clone());
shreds3.insert(0, shreds0[1].clone());
blockstore.insert_shreds(shreds2, None, false).unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
assert_eq!(slot_meta.next_slots, vec![2]);
blockstore.insert_shreds(shreds3, None, false).unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
assert_eq!(slot_meta.next_slots, vec![2, 3]);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_trusted_insert_shreds() {
// Make shred for slot 1
let (shreds1, _) = make_slot_entries(1, 0, 1);
let blockstore_path = get_tmp_ledger_path!();
let last_root = 100;
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_roots(std::iter::once(&last_root)).unwrap();
// Insert will fail, slot < root
blockstore
.insert_shreds(shreds1[..].to_vec(), None, false)
.unwrap();
assert!(blockstore.get_data_shred(1, 0).unwrap().is_none());
// Insert through trusted path will succeed
blockstore
.insert_shreds(shreds1[..].to_vec(), None, true)
.unwrap();
assert!(blockstore.get_data_shred(1, 0).unwrap().is_some());
}
}
#[test]
fn test_get_rooted_block() {
let slot = 10;
let entries = make_slot_entries_with_transactions(100);
let blockhash = get_last_hash(entries.iter()).unwrap();
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
let more_shreds = entries_to_test_shreds(entries.clone(), slot + 1, slot, true, 0);
let unrooted_shreds = entries_to_test_shreds(entries.clone(), slot + 2, slot + 1, true, 0);
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
ledger.insert_shreds(shreds, None, false).unwrap();
ledger.insert_shreds(more_shreds, None, false).unwrap();
ledger.insert_shreds(unrooted_shreds, None, false).unwrap();
ledger
.set_roots(vec![slot - 1, slot, slot + 1].iter())
.unwrap();
let parent_meta = SlotMeta {
parent_slot: std::u64::MAX,
..SlotMeta::default()
};
ledger
.put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap())
.unwrap();
let expected_transactions: Vec<TransactionWithStatusMeta> = entries
.iter()
.cloned()
.filter(|entry| !entry.is_tick())
.flat_map(|entry| entry.transactions)
.map(|transaction| {
let mut pre_balances: Vec<u64> = vec![];
let mut post_balances: Vec<u64> = vec![];
for (i, _account_key) in transaction.message.account_keys.iter().enumerate() {
pre_balances.push(i as u64 * 10);
post_balances.push(i as u64 * 11);
}
let signature = transaction.signatures[0];
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances: pre_balances.clone(),
post_balances: post_balances.clone(),
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![]),
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
}
.into();
ledger
.transaction_status_cf
.put_protobuf((0, signature, slot), &status)
.unwrap();
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances: pre_balances.clone(),
post_balances: post_balances.clone(),
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![]),
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
}
.into();
ledger
.transaction_status_cf
.put_protobuf((0, signature, slot + 1), &status)
.unwrap();
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances: pre_balances.clone(),
post_balances: post_balances.clone(),
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![]),
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
}
.into();
ledger
.transaction_status_cf
.put_protobuf((0, signature, slot + 2), &status)
.unwrap();
TransactionWithStatusMeta {
transaction,
meta: Some(TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances,
post_balances,
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![]),
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
}),
}
})
.collect();
// Even if marked as root, a slot that is empty of entries should return an error
let confirmed_block_err = ledger.get_rooted_block(slot - 1, true).unwrap_err();
assert_matches!(confirmed_block_err, BlockstoreError::SlotUnavailable);
// The previous_blockhash of `expected_block` is default because its parent slot is a root,
// but empty of entries (eg. snapshot root slots). This now returns an error.
let confirmed_block_err = ledger.get_rooted_block(slot, true).unwrap_err();
assert_matches!(
confirmed_block_err,
BlockstoreError::ParentEntriesUnavailable
);
// Test if require_previous_blockhash is false
let confirmed_block = ledger.get_rooted_block(slot, false).unwrap();
assert_eq!(confirmed_block.transactions.len(), 100);
let expected_block = ConfirmedBlock {
transactions: expected_transactions.clone(),
parent_slot: slot - 1,
blockhash: blockhash.to_string(),
previous_blockhash: Hash::default().to_string(),
rewards: vec![],
block_time: None,
block_height: None,
};
assert_eq!(confirmed_block, expected_block);
let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap();
assert_eq!(confirmed_block.transactions.len(), 100);
let mut expected_block = ConfirmedBlock {
transactions: expected_transactions.clone(),
parent_slot: slot,
blockhash: blockhash.to_string(),
previous_blockhash: blockhash.to_string(),
rewards: vec![],
block_time: None,
block_height: None,
};
assert_eq!(confirmed_block, expected_block);
let not_root = ledger.get_rooted_block(slot + 2, true).unwrap_err();
assert_matches!(not_root, BlockstoreError::SlotNotRooted);
let complete_block = ledger.get_complete_block(slot + 2, true).unwrap();
assert_eq!(complete_block.transactions.len(), 100);
let mut expected_complete_block = ConfirmedBlock {
transactions: expected_transactions,
parent_slot: slot + 1,
blockhash: blockhash.to_string(),
previous_blockhash: blockhash.to_string(),
rewards: vec![],
block_time: None,
block_height: None,
};
assert_eq!(complete_block, expected_complete_block);
// Test block_time & block_height return, if available
let timestamp = 1_576_183_541;
ledger.blocktime_cf.put(slot + 1, ×tamp).unwrap();
expected_block.block_time = Some(timestamp);
let block_height = slot - 2;
ledger.block_height_cf.put(slot + 1, &block_height).unwrap();
expected_block.block_height = Some(block_height);
let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap();
assert_eq!(confirmed_block, expected_block);
let timestamp = 1_576_183_542;
ledger.blocktime_cf.put(slot + 2, ×tamp).unwrap();
expected_complete_block.block_time = Some(timestamp);
let block_height = slot - 1;
ledger.block_height_cf.put(slot + 2, &block_height).unwrap();
expected_complete_block.block_height = Some(block_height);
let complete_block = ledger.get_complete_block(slot + 2, true).unwrap();
assert_eq!(complete_block, expected_complete_block);
drop(ledger);
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_persist_transaction_status() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let transaction_status_cf = blockstore.db.column::<cf::TransactionStatus>();
let pre_balances_vec = vec![1, 2, 3];
let post_balances_vec = vec![3, 2, 1];
let inner_instructions_vec = vec![InnerInstructions {
index: 0,
instructions: vec![CompiledInstruction::new(1, &(), vec![0])],
}];
let log_messages_vec = vec![String::from("Test message\n")];
let pre_token_balances_vec = vec![];
let post_token_balances_vec = vec![];
let rewards_vec = vec![];
// result not found
assert!(transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
0,
Signature::default(),
0
))
.unwrap()
.is_none());
// insert value
let status = TransactionStatusMeta {
status: solana_sdk::transaction::Result::<()>::Err(
TransactionError::AccountNotFound,
),
fee: 5u64,
pre_balances: pre_balances_vec.clone(),
post_balances: post_balances_vec.clone(),
inner_instructions: Some(inner_instructions_vec.clone()),
log_messages: Some(log_messages_vec.clone()),
pre_token_balances: Some(pre_token_balances_vec.clone()),
post_token_balances: Some(post_token_balances_vec.clone()),
rewards: Some(rewards_vec.clone()),
}
.into();
assert!(transaction_status_cf
.put_protobuf((0, Signature::default(), 0), &status,)
.is_ok());
// result found
let TransactionStatusMeta {
status,
fee,
pre_balances,
post_balances,
inner_instructions,
log_messages,
pre_token_balances,
post_token_balances,
rewards,
} = transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
0,
Signature::default(),
0,
))
.unwrap()
.unwrap()
.try_into()
.unwrap();
assert_eq!(status, Err(TransactionError::AccountNotFound));
assert_eq!(fee, 5u64);
assert_eq!(pre_balances, pre_balances_vec);
assert_eq!(post_balances, post_balances_vec);
assert_eq!(inner_instructions.unwrap(), inner_instructions_vec);
assert_eq!(log_messages.unwrap(), log_messages_vec);
assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec);
assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
assert_eq!(rewards.unwrap(), rewards_vec);
// insert value
let status = TransactionStatusMeta {
status: solana_sdk::transaction::Result::<()>::Ok(()),
fee: 9u64,
pre_balances: pre_balances_vec.clone(),
post_balances: post_balances_vec.clone(),
inner_instructions: Some(inner_instructions_vec.clone()),
log_messages: Some(log_messages_vec.clone()),
pre_token_balances: Some(pre_token_balances_vec.clone()),
post_token_balances: Some(post_token_balances_vec.clone()),
rewards: Some(rewards_vec.clone()),
}
.into();
assert!(transaction_status_cf
.put_protobuf((0, Signature::new(&[2u8; 64]), 9), &status,)
.is_ok());
// result found
let TransactionStatusMeta {
status,
fee,
pre_balances,
post_balances,
inner_instructions,
log_messages,
pre_token_balances,
post_token_balances,
rewards,
} = transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
0,
Signature::new(&[2u8; 64]),
9,
))
.unwrap()
.unwrap()
.try_into()
.unwrap();
// deserialize
assert_eq!(status, Ok(()));
assert_eq!(fee, 9u64);
assert_eq!(pre_balances, pre_balances_vec);
assert_eq!(post_balances, post_balances_vec);
assert_eq!(inner_instructions.unwrap(), inner_instructions_vec);
assert_eq!(log_messages.unwrap(), log_messages_vec);
assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec);
assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
assert_eq!(rewards.unwrap(), rewards_vec);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_transaction_status_index() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let transaction_status_index_cf = blockstore.db.column::<cf::TransactionStatusIndex>();
let slot0 = 10;
// Primary index column is initialized on Blockstore::open
assert!(transaction_status_index_cf.get(0).unwrap().is_some());
assert!(transaction_status_index_cf.get(1).unwrap().is_some());
for _ in 0..5 {
let random_bytes: Vec<u8> = (0..64).map(|_| rand::random::<u8>()).collect();
blockstore
.write_transaction_status(
slot0,
Signature::new(&random_bytes),
vec![&Pubkey::new(&random_bytes[0..32])],
vec![&Pubkey::new(&random_bytes[32..])],
TransactionStatusMeta::default(),
)
.unwrap();
}
// New statuses bump index 0 max_slot
assert_eq!(
transaction_status_index_cf.get(0).unwrap().unwrap(),
TransactionStatusIndexMeta {
max_slot: slot0,
frozen: false,
}
);
assert_eq!(
transaction_status_index_cf.get(1).unwrap().unwrap(),
TransactionStatusIndexMeta::default()
);
let first_status_entry = blockstore
.db
.iter::<cf::TransactionStatus>(IteratorMode::From(
cf::TransactionStatus::as_index(0),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(first_status_entry.0, 0);
assert_eq!(first_status_entry.2, slot0);
let first_address_entry = blockstore
.db
.iter::<cf::AddressSignatures>(IteratorMode::From(
cf::AddressSignatures::as_index(0),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(first_address_entry.0, 0);
assert_eq!(first_address_entry.2, slot0);
blockstore.run_purge(0, 8, PurgeType::PrimaryIndex).unwrap();
// First successful prune freezes index 0
assert_eq!(
transaction_status_index_cf.get(0).unwrap().unwrap(),
TransactionStatusIndexMeta {
max_slot: slot0,
frozen: true,
}
);
assert_eq!(
transaction_status_index_cf.get(1).unwrap().unwrap(),
TransactionStatusIndexMeta::default()
);
let slot1 = 20;
for _ in 0..5 {
let random_bytes: Vec<u8> = (0..64).map(|_| rand::random::<u8>()).collect();
blockstore
.write_transaction_status(
slot1,
Signature::new(&random_bytes),
vec![&Pubkey::new(&random_bytes[0..32])],
vec![&Pubkey::new(&random_bytes[32..])],
TransactionStatusMeta::default(),
)
.unwrap();
}
assert_eq!(
transaction_status_index_cf.get(0).unwrap().unwrap(),
TransactionStatusIndexMeta {
max_slot: slot0,
frozen: true,
}
);
// Index 0 is frozen, so new statuses bump index 1 max_slot
assert_eq!(
transaction_status_index_cf.get(1).unwrap().unwrap(),
TransactionStatusIndexMeta {
max_slot: slot1,
frozen: false,
}
);
// Index 0 statuses and address records still exist
let first_status_entry = blockstore
.db
.iter::<cf::TransactionStatus>(IteratorMode::From(
cf::TransactionStatus::as_index(0),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(first_status_entry.0, 0);
assert_eq!(first_status_entry.2, 10);
let first_address_entry = blockstore
.db
.iter::<cf::AddressSignatures>(IteratorMode::From(
cf::AddressSignatures::as_index(0),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(first_address_entry.0, 0);
assert_eq!(first_address_entry.2, slot0);
// New statuses and address records are stored in index 1
let index1_first_status_entry = blockstore
.db
.iter::<cf::TransactionStatus>(IteratorMode::From(
cf::TransactionStatus::as_index(1),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(index1_first_status_entry.0, 1);
assert_eq!(index1_first_status_entry.2, slot1);
let index1_first_address_entry = blockstore
.db
.iter::<cf::AddressSignatures>(IteratorMode::From(
cf::AddressSignatures::as_index(1),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(index1_first_address_entry.0, 1);
assert_eq!(index1_first_address_entry.2, slot1);
blockstore
.run_purge(0, 18, PurgeType::PrimaryIndex)
.unwrap();
// Successful prune toggles TransactionStatusIndex
assert_eq!(
transaction_status_index_cf.get(0).unwrap().unwrap(),
TransactionStatusIndexMeta {
max_slot: 0,
frozen: false,
}
);
assert_eq!(
transaction_status_index_cf.get(1).unwrap().unwrap(),
TransactionStatusIndexMeta {
max_slot: slot1,
frozen: true,
}
);
// Index 0 has been pruned, so first status and address entries are now index 1
let first_status_entry = blockstore
.db
.iter::<cf::TransactionStatus>(IteratorMode::From(
cf::TransactionStatus::as_index(0),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(first_status_entry.0, 1);
assert_eq!(first_status_entry.2, slot1);
let first_address_entry = blockstore
.db
.iter::<cf::AddressSignatures>(IteratorMode::From(
cf::AddressSignatures::as_index(0),
IteratorDirection::Forward,
))
.unwrap()
.next()
.unwrap()
.0;
assert_eq!(first_address_entry.0, 1);
assert_eq!(first_address_entry.2, slot1);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_get_transaction_status() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// TransactionStatus column opens initialized with one entry at index 2
let transaction_status_cf = blockstore.db.column::<cf::TransactionStatus>();
let pre_balances_vec = vec![1, 2, 3];
let post_balances_vec = vec![3, 2, 1];
let status = TransactionStatusMeta {
status: solana_sdk::transaction::Result::<()>::Ok(()),
fee: 42u64,
pre_balances: pre_balances_vec,
post_balances: post_balances_vec,
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![]),
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
}
.into();
let signature1 = Signature::new(&[1u8; 64]);
let signature2 = Signature::new(&[2u8; 64]);
let signature3 = Signature::new(&[3u8; 64]);
let signature4 = Signature::new(&[4u8; 64]);
let signature5 = Signature::new(&[5u8; 64]);
let signature6 = Signature::new(&[6u8; 64]);
let signature7 = Signature::new(&[7u8; 64]);
// Insert slots with fork
// 0 (root)
// / \
// 1 |
// 2 (root)
// |
// 3
let meta0 = SlotMeta::new(0, 0);
blockstore.meta_cf.put(0, &meta0).unwrap();
let meta1 = SlotMeta::new(1, 0);
blockstore.meta_cf.put(1, &meta1).unwrap();
let meta2 = SlotMeta::new(2, 0);
blockstore.meta_cf.put(2, &meta2).unwrap();
let meta3 = SlotMeta::new(3, 2);
blockstore.meta_cf.put(3, &meta3).unwrap();
blockstore.set_roots(vec![0, 2].iter()).unwrap();
// Initialize index 0, including:
// signature2 in non-root and root,
// signature4 in non-root,
// signature5 in skipped slot and non-root,
// signature6 in skipped slot,
transaction_status_cf
.put_protobuf((0, signature2, 1), &status)
.unwrap();
transaction_status_cf
.put_protobuf((0, signature2, 2), &status)
.unwrap();
transaction_status_cf
.put_protobuf((0, signature4, 1), &status)
.unwrap();
transaction_status_cf
.put_protobuf((0, signature5, 1), &status)
.unwrap();
transaction_status_cf
.put_protobuf((0, signature5, 3), &status)
.unwrap();
transaction_status_cf
.put_protobuf((0, signature6, 1), &status)
.unwrap();
// Initialize index 1, including:
// signature4 in root,
// signature6 in non-root,
// signature5 extra entries
transaction_status_cf
.put_protobuf((1, signature4, 2), &status)
.unwrap();
transaction_status_cf
.put_protobuf((1, signature5, 4), &status)
.unwrap();
transaction_status_cf
.put_protobuf((1, signature5, 5), &status)
.unwrap();
transaction_status_cf
.put_protobuf((1, signature6, 3), &status)
.unwrap();
// Signature exists, root found in index 0
if let (Some((slot, _status)), counter) = blockstore
.get_transaction_status_with_counter(signature2, &[])
.unwrap()
{
assert_eq!(slot, 2);
assert_eq!(counter, 2);
}
// Signature exists, root found although not required
if let (Some((slot, _status)), counter) = blockstore
.get_transaction_status_with_counter(signature2, &[3])
.unwrap()
{
assert_eq!(slot, 2);
assert_eq!(counter, 2);
}
// Signature exists, root found in index 1
if let (Some((slot, _status)), counter) = blockstore
.get_transaction_status_with_counter(signature4, &[])
.unwrap()
{
assert_eq!(slot, 2);
assert_eq!(counter, 3);
}
// Signature exists, root found although not required, in index 1
if let (Some((slot, _status)), counter) = blockstore
.get_transaction_status_with_counter(signature4, &[3])
.unwrap()
{
assert_eq!(slot, 2);
assert_eq!(counter, 3);
}
// Signature exists, no root found
let (status, counter) = blockstore
.get_transaction_status_with_counter(signature5, &[])
.unwrap();
assert_eq!(status, None);
assert_eq!(counter, 6);
// Signature exists, root not required
if let (Some((slot, _status)), counter) = blockstore
.get_transaction_status_with_counter(signature5, &[3])
.unwrap()
{
assert_eq!(slot, 3);
assert_eq!(counter, 2);
}
// Signature does not exist, smaller than existing entries
let (status, counter) = blockstore
.get_transaction_status_with_counter(signature1, &[])
.unwrap();
assert_eq!(status, None);
assert_eq!(counter, 2);
let (status, counter) = blockstore
.get_transaction_status_with_counter(signature1, &[3])
.unwrap();
assert_eq!(status, None);
assert_eq!(counter, 2);
// Signature does not exist, between existing entries
let (status, counter) = blockstore
.get_transaction_status_with_counter(signature3, &[])
.unwrap();
assert_eq!(status, None);
assert_eq!(counter, 2);
let (status, counter) = blockstore
.get_transaction_status_with_counter(signature3, &[3])
.unwrap();
assert_eq!(status, None);
assert_eq!(counter, 2);
// Signature does not exist, larger than existing entries
let (status, counter) = blockstore
.get_transaction_status_with_counter(signature7, &[])
.unwrap();
assert_eq!(status, None);
assert_eq!(counter, 2);
let (status, counter) = blockstore
.get_transaction_status_with_counter(signature7, &[3])
.unwrap();
assert_eq!(status, None);
assert_eq!(counter, 2);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
fn do_test_lowest_cleanup_slot_and_special_cfs(
simulate_compaction: bool,
simulate_ledger_cleanup_service: bool,
) {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// TransactionStatus column opens initialized with one entry at index 2
let transaction_status_cf = blockstore.db.column::<cf::TransactionStatus>();
let pre_balances_vec = vec![1, 2, 3];
let post_balances_vec = vec![3, 2, 1];
let status = TransactionStatusMeta {
status: solana_sdk::transaction::Result::<()>::Ok(()),
fee: 42u64,
pre_balances: pre_balances_vec,
post_balances: post_balances_vec,
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![]),
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
}
.into();
let signature1 = Signature::new(&[2u8; 64]);
let signature2 = Signature::new(&[3u8; 64]);
// Insert rooted slots 0..=3 with no fork
let meta0 = SlotMeta::new(0, 0);
blockstore.meta_cf.put(0, &meta0).unwrap();
let meta1 = SlotMeta::new(1, 0);
blockstore.meta_cf.put(1, &meta1).unwrap();
let meta2 = SlotMeta::new(2, 1);
blockstore.meta_cf.put(2, &meta2).unwrap();
let meta3 = SlotMeta::new(3, 2);
blockstore.meta_cf.put(3, &meta3).unwrap();
blockstore.set_roots(vec![0, 1, 2, 3].iter()).unwrap();
let lowest_cleanup_slot = 1;
let lowest_available_slot = lowest_cleanup_slot + 1;
transaction_status_cf
.put_protobuf((0, signature1, lowest_cleanup_slot), &status)
.unwrap();
transaction_status_cf
.put_protobuf((0, signature2, lowest_available_slot), &status)
.unwrap();
let address0 = solana_sdk::pubkey::new_rand();
let address1 = solana_sdk::pubkey::new_rand();
blockstore
.write_transaction_status(
lowest_cleanup_slot,
signature1,
vec![&address0],
vec![],
TransactionStatusMeta::default(),
)
.unwrap();
blockstore
.write_transaction_status(
lowest_available_slot,
signature2,
vec![&address1],
vec![],
TransactionStatusMeta::default(),
)
.unwrap();
let check_for_missing = || {
(
blockstore
.get_transaction_status_with_counter(signature1, &[])
.unwrap()
.0
.is_none(),
blockstore
.find_address_signatures_for_slot(address0, lowest_cleanup_slot)
.unwrap()
.is_empty(),
blockstore
.find_address_signatures(address0, lowest_cleanup_slot, lowest_cleanup_slot)
.unwrap()
.is_empty(),
)
};
let assert_existing_always = || {
let are_existing_always = (
blockstore
.get_transaction_status_with_counter(signature2, &[])
.unwrap()
.0
.is_some(),
!blockstore
.find_address_signatures_for_slot(address1, lowest_available_slot)
.unwrap()
.is_empty(),
!blockstore
.find_address_signatures(
address1,
lowest_available_slot,
lowest_available_slot,
)
.unwrap()
.is_empty(),
);
assert_eq!(are_existing_always, (true, true, true));
};
let are_missing = check_for_missing();
// should never be missing before the conditional compaction & simulation...
assert_eq!(are_missing, (false, false, false));
assert_existing_always();
if simulate_compaction {
blockstore.set_max_expired_slot(lowest_cleanup_slot);
// force compaction filters to run across whole key range.
blockstore
.compact_storage(Slot::min_value(), Slot::max_value())
.unwrap();
}
if simulate_ledger_cleanup_service {
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
}
let are_missing = check_for_missing();
if simulate_compaction || simulate_ledger_cleanup_service {
// ... when either simulation (or both) is effective, we should observe to be missing
// consistently
assert_eq!(are_missing, (true, true, true));
} else {
// ... otherwise, we should observe to be existing...
assert_eq!(are_missing, (false, false, false));
}
assert_existing_always();
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_lowest_cleanup_slot_and_special_cfs_with_compact_with_ledger_cleanup_service_simulation(
) {
do_test_lowest_cleanup_slot_and_special_cfs(true, true);
}
#[test]
fn test_lowest_cleanup_slot_and_special_cfs_with_compact_without_ledger_cleanup_service_simulation(
) {
do_test_lowest_cleanup_slot_and_special_cfs(true, false);
}
#[test]
fn test_lowest_cleanup_slot_and_special_cfs_without_compact_with_ledger_cleanup_service_simulation(
) {
do_test_lowest_cleanup_slot_and_special_cfs(false, true);
}
#[test]
fn test_lowest_cleanup_slot_and_special_cfs_without_compact_without_ledger_cleanup_service_simulation(
) {
do_test_lowest_cleanup_slot_and_special_cfs(false, false);
}
#[test]
fn test_get_rooted_transaction() {
let slot = 2;
let entries = make_slot_entries_with_transactions(5);
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(vec![slot - 1, slot].iter()).unwrap();
let expected_transactions: Vec<TransactionWithStatusMeta> = entries
.iter()
.cloned()
.filter(|entry| !entry.is_tick())
.flat_map(|entry| entry.transactions)
.map(|transaction| {
let mut pre_balances: Vec<u64> = vec![];
let mut post_balances: Vec<u64> = vec![];
for (i, _account_key) in transaction.message.account_keys.iter().enumerate() {
pre_balances.push(i as u64 * 10);
post_balances.push(i as u64 * 11);
}
let inner_instructions = Some(vec![InnerInstructions {
index: 0,
instructions: vec![CompiledInstruction::new(1, &(), vec![0])],
}]);
let log_messages = Some(vec![String::from("Test message\n")]);
let pre_token_balances = Some(vec![]);
let post_token_balances = Some(vec![]);
let rewards = Some(vec![]);
let signature = transaction.signatures[0];
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances: pre_balances.clone(),
post_balances: post_balances.clone(),
inner_instructions: inner_instructions.clone(),
log_messages: log_messages.clone(),
pre_token_balances: pre_token_balances.clone(),
post_token_balances: post_token_balances.clone(),
rewards: rewards.clone(),
}
.into();
blockstore
.transaction_status_cf
.put_protobuf((0, signature, slot), &status)
.unwrap();
TransactionWithStatusMeta {
transaction,
meta: Some(TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances,
post_balances,
inner_instructions,
log_messages,
pre_token_balances,
post_token_balances,
rewards,
}),
}
})
.collect();
for transaction in expected_transactions.clone() {
let signature = transaction.transaction.signatures[0];
assert_eq!(
blockstore.get_rooted_transaction(signature).unwrap(),
Some(ConfirmedTransaction {
slot,
transaction: transaction.clone(),
block_time: None
})
);
assert_eq!(
blockstore
.get_complete_transaction(signature, slot + 1)
.unwrap(),
Some(ConfirmedTransaction {
slot,
transaction,
block_time: None
})
);
}
blockstore.run_purge(0, 2, PurgeType::PrimaryIndex).unwrap();
*blockstore.lowest_cleanup_slot.write().unwrap() = slot;
for TransactionWithStatusMeta { transaction, .. } in expected_transactions {
let signature = transaction.signatures[0];
assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None,);
assert_eq!(
blockstore
.get_complete_transaction(signature, slot + 1)
.unwrap(),
None,
);
}
}
#[test]
fn test_get_complete_transaction() {
let slot = 2;
let entries = make_slot_entries_with_transactions(5);
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
let expected_transactions: Vec<TransactionWithStatusMeta> = entries
.iter()
.cloned()
.filter(|entry| !entry.is_tick())
.flat_map(|entry| entry.transactions)
.map(|transaction| {
let mut pre_balances: Vec<u64> = vec![];
let mut post_balances: Vec<u64> = vec![];
for (i, _account_key) in transaction.message.account_keys.iter().enumerate() {
pre_balances.push(i as u64 * 10);
post_balances.push(i as u64 * 11);
}
let inner_instructions = Some(vec![InnerInstructions {
index: 0,
instructions: vec![CompiledInstruction::new(1, &(), vec![0])],
}]);
let log_messages = Some(vec![String::from("Test message\n")]);
let pre_token_balances = Some(vec![]);
let post_token_balances = Some(vec![]);
let rewards = Some(vec![]);
let signature = transaction.signatures[0];
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances: pre_balances.clone(),
post_balances: post_balances.clone(),
inner_instructions: inner_instructions.clone(),
log_messages: log_messages.clone(),
pre_token_balances: pre_token_balances.clone(),
post_token_balances: post_token_balances.clone(),
rewards: rewards.clone(),
}
.into();
blockstore
.transaction_status_cf
.put_protobuf((0, signature, slot), &status)
.unwrap();
TransactionWithStatusMeta {
transaction,
meta: Some(TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances,
post_balances,
inner_instructions,
log_messages,
pre_token_balances,
post_token_balances,
rewards,
}),
}
})
.collect();
for transaction in expected_transactions.clone() {
let signature = transaction.transaction.signatures[0];
assert_eq!(
blockstore
.get_complete_transaction(signature, slot)
.unwrap(),
Some(ConfirmedTransaction {
slot,
transaction,
block_time: None
})
);
assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None);
}
blockstore.run_purge(0, 2, PurgeType::PrimaryIndex).unwrap();
*blockstore.lowest_cleanup_slot.write().unwrap() = slot;
for TransactionWithStatusMeta { transaction, .. } in expected_transactions {
let signature = transaction.signatures[0];
assert_eq!(
blockstore
.get_complete_transaction(signature, slot)
.unwrap(),
None,
);
assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None,);
}
}
#[test]
fn test_empty_transaction_status() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_roots(std::iter::once(&0)).unwrap();
assert_eq!(
blockstore
.get_rooted_transaction(Signature::default())
.unwrap(),
None
);
}
#[test]
fn test_get_confirmed_signatures_for_address() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let address0 = solana_sdk::pubkey::new_rand();
let address1 = solana_sdk::pubkey::new_rand();
let slot0 = 10;
for x in 1..5 {
let signature = Signature::new(&[x; 64]);
blockstore
.write_transaction_status(
slot0,
signature,
vec![&address0],
vec![&address1],
TransactionStatusMeta::default(),
)
.unwrap();
}
// Purge to freeze index 0
blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap();
let slot1 = 20;
for x in 5..9 {
let signature = Signature::new(&[x; 64]);
blockstore
.write_transaction_status(
slot1,
signature,
vec![&address0],
vec![&address1],
TransactionStatusMeta::default(),
)
.unwrap();
}
blockstore.set_roots(vec![slot0, slot1].iter()).unwrap();
let all0 = blockstore
.get_confirmed_signatures_for_address(address0, 0, 50)
.unwrap();
assert_eq!(all0.len(), 8);
for x in 1..9 {
let expected_signature = Signature::new(&[x; 64]);
assert_eq!(all0[x as usize - 1], expected_signature);
}
assert_eq!(
blockstore
.get_confirmed_signatures_for_address(address0, 20, 50)
.unwrap()
.len(),
4
);
assert_eq!(
blockstore
.get_confirmed_signatures_for_address(address0, 0, 10)
.unwrap()
.len(),
4
);
assert!(blockstore
.get_confirmed_signatures_for_address(address0, 1, 5)
.unwrap()
.is_empty());
assert_eq!(
blockstore
.get_confirmed_signatures_for_address(address0, 1, 15)
.unwrap()
.len(),
4
);
let all1 = blockstore
.get_confirmed_signatures_for_address(address1, 0, 50)
.unwrap();
assert_eq!(all1.len(), 8);
for x in 1..9 {
let expected_signature = Signature::new(&[x; 64]);
assert_eq!(all1[x as usize - 1], expected_signature);
}
// Purge index 0
blockstore
.run_purge(0, 10, PurgeType::PrimaryIndex)
.unwrap();
assert_eq!(
blockstore
.get_confirmed_signatures_for_address(address0, 0, 50)
.unwrap()
.len(),
4
);
assert_eq!(
blockstore
.get_confirmed_signatures_for_address(address0, 20, 50)
.unwrap()
.len(),
4
);
assert!(blockstore
.get_confirmed_signatures_for_address(address0, 0, 10)
.unwrap()
.is_empty());
assert!(blockstore
.get_confirmed_signatures_for_address(address0, 1, 5)
.unwrap()
.is_empty());
assert_eq!(
blockstore
.get_confirmed_signatures_for_address(address0, 1, 25)
.unwrap()
.len(),
4
);
// Test sort, regardless of entry order or signature value
for slot in (21..25).rev() {
let random_bytes: Vec<u8> = (0..64).map(|_| rand::random::<u8>()).collect();
let signature = Signature::new(&random_bytes);
blockstore
.write_transaction_status(
slot,
signature,
vec![&address0],
vec![&address1],
TransactionStatusMeta::default(),
)
.unwrap();
}
blockstore.set_roots(vec![21, 22, 23, 24].iter()).unwrap();
let mut past_slot = 0;
for (slot, _) in blockstore.find_address_signatures(address0, 1, 25).unwrap() {
assert!(slot >= past_slot);
past_slot = slot;
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_find_address_signatures_for_slot() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let address0 = solana_sdk::pubkey::new_rand();
let address1 = solana_sdk::pubkey::new_rand();
let slot1 = 1;
for x in 1..5 {
let signature = Signature::new(&[x; 64]);
blockstore
.write_transaction_status(
slot1,
signature,
vec![&address0],
vec![&address1],
TransactionStatusMeta::default(),
)
.unwrap();
}
let slot2 = 2;
for x in 5..7 {
let signature = Signature::new(&[x; 64]);
blockstore
.write_transaction_status(
slot2,
signature,
vec![&address0],
vec![&address1],
TransactionStatusMeta::default(),
)
.unwrap();
}
// Purge to freeze index 0
blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap();
for x in 7..9 {
let signature = Signature::new(&[x; 64]);
blockstore
.write_transaction_status(
slot2,
signature,
vec![&address0],
vec![&address1],
TransactionStatusMeta::default(),
)
.unwrap();
}
let slot3 = 3;
for x in 9..13 {
let signature = Signature::new(&[x; 64]);
blockstore
.write_transaction_status(
slot3,
signature,
vec![&address0],
vec![&address1],
TransactionStatusMeta::default(),
)
.unwrap();
}
blockstore.set_roots(std::iter::once(&slot1)).unwrap();
let slot1_signatures = blockstore
.find_address_signatures_for_slot(address0, 1)
.unwrap();
for (i, (slot, signature)) in slot1_signatures.iter().enumerate() {
assert_eq!(*slot, slot1);
assert_eq!(*signature, Signature::new(&[i as u8 + 1; 64]));
}
let slot2_signatures = blockstore
.find_address_signatures_for_slot(address0, 2)
.unwrap();
for (i, (slot, signature)) in slot2_signatures.iter().enumerate() {
assert_eq!(*slot, slot2);
assert_eq!(*signature, Signature::new(&[i as u8 + 5; 64]));
}
let slot3_signatures = blockstore
.find_address_signatures_for_slot(address0, 3)
.unwrap();
for (i, (slot, signature)) in slot3_signatures.iter().enumerate() {
assert_eq!(*slot, slot3);
assert_eq!(*signature, Signature::new(&[i as u8 + 9; 64]));
}
}
}
#[test]
fn test_get_confirmed_signatures_for_address2() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec<Entry> {
let mut entries: Vec<Entry> = Vec::new();
for address in addresses {
let transaction = Transaction::new_with_compiled_instructions(
&[&Keypair::new()],
&[*address],
Hash::default(),
vec![solana_sdk::pubkey::new_rand()],
vec![CompiledInstruction::new(1, &(), vec![0])],
);
entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
let mut tick = create_ticks(1, 0, hash(&serialize(address).unwrap()));
entries.append(&mut tick);
}
entries
}
let address0 = solana_sdk::pubkey::new_rand();
let address1 = solana_sdk::pubkey::new_rand();
for slot in 2..=8 {
let entries = make_slot_entries_with_transaction_addresses(&[
address0, address1, address0, address1,
]);
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
blockstore.insert_shreds(shreds, None, false).unwrap();
for (i, entry) in entries.iter().enumerate() {
if slot == 4 && i == 2 {
// Purge to freeze index 0 and write address-signatures in new primary index
blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap();
}
for transaction in &entry.transactions {
assert_eq!(transaction.signatures.len(), 1);
blockstore
.write_transaction_status(
slot,
transaction.signatures[0],
transaction.message.account_keys.iter().collect(),
vec![],
TransactionStatusMeta::default(),
)
.unwrap();
}
}
}
// Add 2 slots that both descend from slot 8
for slot in 9..=10 {
let entries = make_slot_entries_with_transaction_addresses(&[
address0, address1, address0, address1,
]);
let shreds = entries_to_test_shreds(entries.clone(), slot, 8, true, 0);
blockstore.insert_shreds(shreds, None, false).unwrap();
for entry in entries.iter() {
for transaction in &entry.transactions {
assert_eq!(transaction.signatures.len(), 1);
blockstore
.write_transaction_status(
slot,
transaction.signatures[0],
transaction.message.account_keys.iter().collect(),
vec![],
TransactionStatusMeta::default(),
)
.unwrap();
}
}
}
// Leave one slot unrooted to test only returns confirmed signatures
blockstore
.set_roots(vec![1, 2, 4, 5, 6, 7, 8].iter())
.unwrap();
let highest_confirmed_root = 8;
// Fetch all rooted signatures for address 0 at once...
let sig_infos = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
None,
None,
usize::MAX,
)
.unwrap();
assert!(sig_infos.found_before);
let all0 = sig_infos.infos;
assert_eq!(all0.len(), 12);
// Fetch all rooted signatures for address 1 at once...
let all1 = blockstore
.get_confirmed_signatures_for_address2(
address1,
highest_confirmed_root,
None,
None,
usize::MAX,
)
.unwrap()
.infos;
assert_eq!(all1.len(), 12);
// Fetch all signatures for address 0 individually
for i in 0..all0.len() {
let sig_infos = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
None,
1,
)
.unwrap();
assert!(sig_infos.found_before);
let results = sig_infos.infos;
assert_eq!(results.len(), 1);
assert_eq!(results[0], all0[i], "Unexpected result for {}", i);
}
// Fetch all signatures for address 0 individually using `until`
for i in 0..all0.len() {
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
if i == all0.len() - 1 || i == all0.len() {
None
} else {
Some(all0[i + 1].signature)
},
10,
)
.unwrap()
.infos;
assert_eq!(results.len(), 1);
assert_eq!(results[0], all0[i], "Unexpected result for {}", i);
}
let sig_infos = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
Some(all0[all0.len() - 1].signature),
None,
1,
)
.unwrap();
assert!(sig_infos.found_before);
assert!(sig_infos.infos.is_empty());
assert!(blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
None,
Some(all0[0].signature),
2,
)
.unwrap()
.infos
.is_empty());
// Fetch all signatures for address 0, three at a time
assert!(all0.len() % 3 == 0);
for i in (0..all0.len()).step_by(3) {
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
None,
3,
)
.unwrap()
.infos;
assert_eq!(results.len(), 3);
assert_eq!(results[0], all0[i]);
assert_eq!(results[1], all0[i + 1]);
assert_eq!(results[2], all0[i + 2]);
}
// Ensure that the signatures within a slot are reverse ordered by signature
// (current limitation of the .get_confirmed_signatures_for_address2())
for i in (0..all1.len()).step_by(2) {
let results = blockstore
.get_confirmed_signatures_for_address2(
address1,
highest_confirmed_root,
if i == 0 {
None
} else {
Some(all1[i - 1].signature)
},
None,
2,
)
.unwrap()
.infos;
assert_eq!(results.len(), 2);
assert_eq!(results[0].slot, results[1].slot);
assert!(results[0].signature >= results[1].signature);
assert_eq!(results[0], all1[i]);
assert_eq!(results[1], all1[i + 1]);
}
// A search for address 0 with `before` and/or `until` signatures from address1 should also work
let sig_infos = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
Some(all1[0].signature),
None,
usize::MAX,
)
.unwrap();
assert!(sig_infos.found_before);
let results = sig_infos.infos;
// The exact number of results returned is variable, based on the sort order of the
// random signatures that are generated
assert!(!results.is_empty());
let results2 = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
Some(all1[0].signature),
Some(all1[4].signature),
usize::MAX,
)
.unwrap()
.infos;
assert!(results2.len() < results.len());
// Duplicate all tests using confirmed signatures
let highest_confirmed_slot = 10;
// Fetch all signatures for address 0 at once...
let all0 = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
None,
None,
usize::MAX,
)
.unwrap()
.infos;
assert_eq!(all0.len(), 14);
// Fetch all signatures for address 1 at once...
let all1 = blockstore
.get_confirmed_signatures_for_address2(
address1,
highest_confirmed_slot,
None,
None,
usize::MAX,
)
.unwrap()
.infos;
assert_eq!(all1.len(), 14);
// Fetch all signatures for address 0 individually
for i in 0..all0.len() {
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
None,
1,
)
.unwrap()
.infos;
assert_eq!(results.len(), 1);
assert_eq!(results[0], all0[i], "Unexpected result for {}", i);
}
// Fetch all signatures for address 0 individually using `until`
for i in 0..all0.len() {
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
if i == all0.len() - 1 || i == all0.len() {
None
} else {
Some(all0[i + 1].signature)
},
10,
)
.unwrap()
.infos;
assert_eq!(results.len(), 1);
assert_eq!(results[0], all0[i], "Unexpected result for {}", i);
}
assert!(blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
Some(all0[all0.len() - 1].signature),
None,
1,
)
.unwrap()
.infos
.is_empty());
assert!(blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
None,
Some(all0[0].signature),
2,
)
.unwrap()
.infos
.is_empty());
// Fetch all signatures for address 0, three at a time
assert!(all0.len() % 3 == 2);
for i in (0..all0.len()).step_by(3) {
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
None,
3,
)
.unwrap()
.infos;
if i < 12 {
assert_eq!(results.len(), 3);
assert_eq!(results[2], all0[i + 2]);
} else {
assert_eq!(results.len(), 2);
}
assert_eq!(results[0], all0[i]);
assert_eq!(results[1], all0[i + 1]);
}
// Ensure that the signatures within a slot are reverse ordered by signature
// (current limitation of the .get_confirmed_signatures_for_address2())
for i in (0..all1.len()).step_by(2) {
let results = blockstore
.get_confirmed_signatures_for_address2(
address1,
highest_confirmed_slot,
if i == 0 {
None
} else {
Some(all1[i - 1].signature)
},
None,
2,
)
.unwrap()
.infos;
assert_eq!(results.len(), 2);
assert_eq!(results[0].slot, results[1].slot);
assert!(results[0].signature >= results[1].signature);
assert_eq!(results[0], all1[i]);
assert_eq!(results[1], all1[i + 1]);
}
// A search for address 0 with `before` and/or `until` signatures from address1 should also work
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
Some(all1[0].signature),
None,
usize::MAX,
)
.unwrap()
.infos;
// The exact number of results returned is variable, based on the sort order of the
// random signatures that are generated
assert!(!results.is_empty());
let results2 = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_slot,
Some(all1[0].signature),
Some(all1[4].signature),
usize::MAX,
)
.unwrap()
.infos;
assert!(results2.len() < results.len());
// Remove signature
blockstore
.address_signatures_cf
.delete((0, address0, 2, all0[0].signature))
.unwrap();
let sig_infos = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
Some(all0[0].signature),
None,
usize::MAX,
)
.unwrap();
assert!(!sig_infos.found_before);
assert!(sig_infos.infos.is_empty());
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
#[allow(clippy::same_item_push)]
fn test_get_last_hash() {
let mut entries: Vec<Entry> = vec![];
let empty_entries_iterator = entries.iter();
assert!(get_last_hash(empty_entries_iterator).is_none());
let mut prev_hash = hash::hash(&[42u8]);
for _ in 0..10 {
let entry = next_entry(&prev_hash, 1, vec![]);
prev_hash = entry.hash;
entries.push(entry);
}
let entries_iterator = entries.iter();
assert_eq!(get_last_hash(entries_iterator).unwrap(), entries[9].hash);
}
#[test]
fn test_map_transactions_to_statuses() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let transaction_status_cf = blockstore.db.column::<cf::TransactionStatus>();
let slot = 0;
let mut transactions: Vec<Transaction> = vec![];
for x in 0..4 {
let transaction = Transaction::new_with_compiled_instructions(
&[&Keypair::new()],
&[solana_sdk::pubkey::new_rand()],
Hash::default(),
vec![solana_sdk::pubkey::new_rand()],
vec![CompiledInstruction::new(1, &(), vec![0])],
);
let status = TransactionStatusMeta {
status: solana_sdk::transaction::Result::<()>::Err(
TransactionError::AccountNotFound,
),
fee: x,
pre_balances: vec![],
post_balances: vec![],
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![]),
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
}
.into();
transaction_status_cf
.put_protobuf((0, transaction.signatures[0], slot), &status)
.unwrap();
transactions.push(transaction);
}
// Push transaction that will not have matching status, as a test case
transactions.push(Transaction::new_with_compiled_instructions(
&[&Keypair::new()],
&[solana_sdk::pubkey::new_rand()],
Hash::default(),
vec![solana_sdk::pubkey::new_rand()],
vec![CompiledInstruction::new(1, &(), vec![0])],
));
let map = blockstore.map_transactions_to_statuses(slot, transactions.into_iter());
assert_eq!(map.len(), 5);
for (x, m) in map.iter().take(4).enumerate() {
assert_eq!(m.meta.as_ref().unwrap().fee, x as u64);
}
assert_eq!(map[4].meta, None);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_write_get_perf_samples() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries: usize = 10;
let mut perf_samples: Vec<(Slot, PerfSample)> = vec![];
for x in 1..num_entries + 1 {
perf_samples.push((
x as u64 * 50,
PerfSample {
num_transactions: 1000 + x as u64,
num_slots: 50,
sample_period_secs: 20,
},
));
}
for (slot, sample) in perf_samples.iter() {
blockstore.write_perf_sample(*slot, sample).unwrap();
}
for x in 0..num_entries {
let mut expected_samples = perf_samples[num_entries - 1 - x..].to_vec();
expected_samples.sort_by(|a, b| b.0.cmp(&a.0));
assert_eq!(
blockstore.get_recent_perf_samples(x + 1).unwrap(),
expected_samples
);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_lowest_slot() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
for i in 0..10 {
let slot = i;
let (shreds, _) = make_slot_entries(slot, 0, 1);
blockstore.insert_shreds(shreds, None, false).unwrap();
}
assert_eq!(blockstore.lowest_slot(), 1);
blockstore.run_purge(0, 5, PurgeType::PrimaryIndex).unwrap();
assert_eq!(blockstore.lowest_slot(), 6);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_recovery() {
let slot = 1;
let (data_shreds, coding_shreds, leader_schedule_cache) =
setup_erasure_shreds(slot, 0, 100);
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore
.insert_shreds(coding_shreds, Some(&leader_schedule_cache), false)
.unwrap();
let shred_bufs: Vec<_> = data_shreds
.iter()
.map(|shred| shred.payload.clone())
.collect();
// Check all the data shreds were recovered
for (s, buf) in data_shreds.iter().zip(shred_bufs) {
assert_eq!(
blockstore
.get_data_shred(s.slot(), s.index() as u64)
.unwrap()
.unwrap(),
buf
);
}
verify_index_integrity(&blockstore, slot);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_index_integrity() {
let slot = 1;
let num_entries = 100;
let (data_shreds, coding_shreds, leader_schedule_cache) =
setup_erasure_shreds(slot, 0, num_entries);
assert!(data_shreds.len() > 3);
assert!(coding_shreds.len() > 3);
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Test inserting all the shreds
let all_shreds: Vec<_> = data_shreds
.iter()
.cloned()
.chain(coding_shreds.iter().cloned())
.collect();
blockstore
.insert_shreds(all_shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
// Test inserting just the codes, enough for recovery
blockstore
.insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
// Test inserting some codes, but not enough for recovery
blockstore
.insert_shreds(
coding_shreds[..coding_shreds.len() - 1].to_vec(),
Some(&leader_schedule_cache),
false,
)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
// Test inserting just the codes, and some data, enough for recovery
let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() - 1].iter().cloned())
.collect();
blockstore
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
// Test inserting some codes, and some data, but enough for recovery
let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
.collect();
blockstore
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
// Test inserting all shreds in 2 rounds, make sure nothing is lost
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
.collect();
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..]
.iter()
.cloned()
.chain(coding_shreds[coding_shreds.len() / 2 - 1..].iter().cloned())
.collect();
blockstore
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
.unwrap();
blockstore
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
// Test not all, but enough data and coding shreds in 2 rounds to trigger recovery,
// make sure nothing is lost
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
.collect();
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..data_shreds.len() / 2]
.iter()
.cloned()
.chain(
coding_shreds[coding_shreds.len() / 2 - 1..coding_shreds.len() / 2]
.iter()
.cloned(),
)
.collect();
blockstore
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
.unwrap();
blockstore
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
// Test insert shreds in 2 rounds, but not enough to trigger
// recovery, make sure nothing is lost
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 2]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 2].iter().cloned())
.collect();
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 2..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(
coding_shreds[coding_shreds.len() / 2 - 2..coding_shreds.len() / 2 - 1]
.iter()
.cloned(),
)
.collect();
blockstore
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
.unwrap();
blockstore
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_and_compact_slots(0, slot);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
fn setup_erasure_shreds(
slot: u64,
parent_slot: u64,
num_entries: u64,
) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
let entries = make_slot_entries_with_transactions(num_entries);
let leader_keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, parent_slot, leader_keypair.clone(), 0, 0).unwrap();
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0);
let genesis_config = create_genesis_config(2).genesis_config;
let bank = Arc::new(Bank::new(&genesis_config));
let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank);
let fixed_schedule = FixedSchedule {
leader_schedule: Arc::new(LeaderSchedule::new_from_schedule(vec![
leader_keypair.pubkey()
])),
start_epoch: 0,
};
leader_schedule_cache.set_fixed_leader_schedule(Some(fixed_schedule));
(data_shreds, coding_shreds, Arc::new(leader_schedule_cache))
}
fn verify_index_integrity(blockstore: &Blockstore, slot: u64) {
let index = blockstore.get_index(slot).unwrap().unwrap();
// Test the set of data shreds in the index and in the data column
// family are the same
let data_iter = blockstore.slot_data_iterator(slot, 0).unwrap();
let mut num_data = 0;
for ((slot, index), _) in data_iter {
num_data += 1;
assert!(blockstore.get_data_shred(slot, index).unwrap().is_some());
}
// Test the data index doesn't have anything extra
let num_data_in_index = index.data().num_shreds();
assert_eq!(num_data_in_index, num_data);
// Test the set of coding shreds in the index and in the coding column
// family are the same
let coding_iter = blockstore.slot_coding_iterator(slot, 0).unwrap();
let mut num_coding = 0;
for ((slot, index), _) in coding_iter {
num_coding += 1;
assert!(blockstore.get_coding_shred(slot, index).unwrap().is_some());
}
// Test the data index doesn't have anything extra
let num_coding_in_index = index.coding().num_shreds();
assert_eq!(num_coding_in_index, num_coding);
}
#[test]
fn test_duplicate_slot() {
let slot = 0;
let entries1 = make_slot_entries_with_transactions(1);
let entries2 = make_slot_entries_with_transactions(1);
let leader_keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, 0, leader_keypair, 0, 0).unwrap();
let (shreds, _, _) = shredder.entries_to_shreds(&entries1, true, 0);
let (duplicate_shreds, _, _) = shredder.entries_to_shreds(&entries2, true, 0);
let shred = shreds[0].clone();
let duplicate_shred = duplicate_shreds[0].clone();
let non_duplicate_shred = shred.clone();
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore
.insert_shreds(vec![shred.clone()], None, false)
.unwrap();
// No duplicate shreds exist yet
assert!(!blockstore.has_duplicate_shreds_in_slot(slot));
// Check if shreds are duplicated
assert_eq!(
blockstore.is_shred_duplicate(
slot,
0,
duplicate_shred.payload.clone(),
duplicate_shred.shred_type()
),
Some(shred.payload.to_vec())
);
assert!(blockstore
.is_shred_duplicate(
slot,
0,
non_duplicate_shred.payload,
duplicate_shred.shred_type()
)
.is_none());
// Store a duplicate shred
blockstore
.store_duplicate_slot(slot, shred.payload.clone(), duplicate_shred.payload.clone())
.unwrap();
// Slot is now marked as duplicate
assert!(blockstore.has_duplicate_shreds_in_slot(slot));
// Check ability to fetch the duplicates
let duplicate_proof = blockstore.get_duplicate_slot(slot).unwrap();
assert_eq!(duplicate_proof.shred1, shred.payload);
assert_eq!(duplicate_proof.shred2, duplicate_shred.payload);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_clear_unconfirmed_slot() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let unconfirmed_slot = 9;
let unconfirmed_child_slot = 10;
let slots = vec![2, unconfirmed_slot, unconfirmed_child_slot];
// Insert into slot 9, mark it as dead
let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1)
.into_iter()
.flat_map(|x| x.0)
.collect();
blockstore.insert_shreds(shreds, None, false).unwrap();
// Should only be one shred in slot 9
assert!(blockstore
.get_data_shred(unconfirmed_slot, 0)
.unwrap()
.is_some());
assert!(blockstore
.get_data_shred(unconfirmed_slot, 1)
.unwrap()
.is_none());
blockstore.set_dead_slot(unconfirmed_slot).unwrap();
// Purge the slot
blockstore.clear_unconfirmed_slot(unconfirmed_slot);
assert!(!blockstore.is_dead(unconfirmed_slot));
assert_eq!(
blockstore
.meta(unconfirmed_slot)
.unwrap()
.unwrap()
.next_slots,
vec![unconfirmed_child_slot]
);
assert!(blockstore
.get_data_shred(unconfirmed_slot, 0)
.unwrap()
.is_none());
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_update_completed_data_indexes() {
let mut completed_data_indexes = BTreeSet::default();
let mut shred_index = ShredIndex::default();
for i in 0..10 {
shred_index.set_present(i as u64, true);
assert_eq!(
update_completed_data_indexes(true, i, &shred_index, &mut completed_data_indexes),
vec![(i, i)]
);
assert!(completed_data_indexes.iter().copied().eq(0..=i));
}
}
#[test]
fn test_update_completed_data_indexes_out_of_order() {
let mut completed_data_indexes = BTreeSet::default();
let mut shred_index = ShredIndex::default();
shred_index.set_present(4, true);
assert!(
update_completed_data_indexes(false, 4, &shred_index, &mut completed_data_indexes)
.is_empty()
);
assert!(completed_data_indexes.is_empty());
shred_index.set_present(2, true);
assert!(
update_completed_data_indexes(false, 2, &shred_index, &mut completed_data_indexes)
.is_empty()
);
assert!(completed_data_indexes.is_empty());
shred_index.set_present(3, true);
assert!(
update_completed_data_indexes(true, 3, &shred_index, &mut completed_data_indexes)
.is_empty()
);
assert!(completed_data_indexes.iter().eq([3].iter()));
// Inserting data complete shred 1 now confirms the range of shreds [2, 3]
// is part of the same data set
shred_index.set_present(1, true);
assert_eq!(
update_completed_data_indexes(true, 1, &shred_index, &mut completed_data_indexes),
vec![(2, 3)]
);
assert!(completed_data_indexes.iter().eq([1, 3].iter()));
// Inserting data complete shred 0 now confirms the range of shreds [0]
// is part of the same data set
shred_index.set_present(0, true);
assert_eq!(
update_completed_data_indexes(true, 0, &shred_index, &mut completed_data_indexes),
vec![(0, 0), (1, 1)]
);
assert!(completed_data_indexes.iter().eq([0, 1, 3].iter()));
}
#[test]
fn test_rewards_protobuf_backward_compatability() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let rewards: Rewards = (0..100)
.map(|i| Reward {
pubkey: solana_sdk::pubkey::new_rand().to_string(),
lamports: 42 + i,
post_balance: std::u64::MAX,
reward_type: Some(RewardType::Fee),
commission: None,
})
.collect();
let protobuf_rewards: generated::Rewards = rewards.into();
let deprecated_rewards: StoredExtendedRewards = protobuf_rewards.clone().into();
for slot in 0..2 {
let data = serialize(&deprecated_rewards).unwrap();
blockstore.rewards_cf.put_bytes(slot, &data).unwrap();
}
for slot in 2..4 {
blockstore
.rewards_cf
.put_protobuf(slot, &protobuf_rewards)
.unwrap();
}
for slot in 0..4 {
assert_eq!(
blockstore
.rewards_cf
.get_protobuf_or_bincode::<StoredExtendedRewards>(slot)
.unwrap()
.unwrap(),
protobuf_rewards
);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_transaction_status_protobuf_backward_compatability() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
pre_balances: vec![1, 2, 3],
post_balances: vec![1, 2, 3],
inner_instructions: Some(vec![]),
log_messages: Some(vec![]),
pre_token_balances: Some(vec![TransactionTokenBalance {
account_index: 0,
mint: Pubkey::new_unique().to_string(),
ui_token_amount: UiTokenAmount {
ui_amount: Some(1.1),
decimals: 1,
amount: "11".to_string(),
ui_amount_string: "1.1".to_string(),
},
owner: Pubkey::new_unique().to_string(),
}]),
post_token_balances: Some(vec![TransactionTokenBalance {
account_index: 0,
mint: Pubkey::new_unique().to_string(),
ui_token_amount: UiTokenAmount {
ui_amount: None,
decimals: 1,
amount: "11".to_string(),
ui_amount_string: "1.1".to_string(),
},
owner: Pubkey::new_unique().to_string(),
}]),
rewards: Some(vec![Reward {
pubkey: "My11111111111111111111111111111111111111111".to_string(),
lamports: -42,
post_balance: 42,
reward_type: Some(RewardType::Rent),
commission: None,
}]),
};
let deprecated_status: StoredTransactionStatusMeta = status.clone().into();
let protobuf_status: generated::TransactionStatusMeta = status.into();
for slot in 0..2 {
let data = serialize(&deprecated_status).unwrap();
blockstore
.transaction_status_cf
.put_bytes((0, Signature::default(), slot), &data)
.unwrap();
}
for slot in 2..4 {
blockstore
.transaction_status_cf
.put_protobuf((0, Signature::default(), slot), &protobuf_status)
.unwrap();
}
for slot in 0..4 {
assert_eq!(
blockstore
.transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
0,
Signature::default(),
slot
))
.unwrap()
.unwrap(),
protobuf_status
);
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_remove_shred_data_complete_flag() {
let (mut shreds, entries) = make_slot_entries(0, 0, 1);
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
// Remove the data complete flag from the last shred
shreds[0].unset_data_complete();
ledger.insert_shreds(shreds, None, false).unwrap();
// Check that the `data_complete` flag was unset in the stored shred, but the
// `last_in_slot` flag is set.
let stored_shred = &ledger.get_data_shreds_for_slot(0, 0).unwrap()[0];
assert!(!stored_shred.data_complete());
assert!(stored_shred.last_in_slot());
assert_eq!(entries, ledger.get_any_valid_slot_entries(0, 0));
}
fn make_large_tx_entry(num_txs: usize) -> Entry {
let txs: Vec<_> = (0..num_txs)
.into_iter()
.map(|_| {
let keypair0 = Keypair::new();
let to = solana_sdk::pubkey::new_rand();
solana_sdk::system_transaction::transfer(&keypair0, &to, 1, Hash::default())
})
.collect();
Entry::new(&Hash::default(), 1, txs)
}
#[test]
fn erasure_multiple_config() {
solana_logger::setup();
let slot = 1;
let parent = 0;
let num_txs = 20;
let entry = make_large_tx_entry(num_txs);
let shreds = entries_to_test_shreds(vec![entry], slot, parent, true, 0);
assert!(shreds.len() > 1);
let ledger_path = get_tmp_ledger_path!();
let ledger = Blockstore::open(&ledger_path).unwrap();
let coding1 = Shredder::generate_coding_shreds(&shreds, false);
let coding2 = Shredder::generate_coding_shreds(&shreds, true);
for shred in &shreds {
info!("shred {:?}", shred);
}
for shred in &coding1 {
info!("coding1 {:?}", shred);
}
for shred in &coding2 {
info!("coding2 {:?}", shred);
}
ledger
.insert_shreds(shreds[..shreds.len() - 2].to_vec(), None, false)
.unwrap();
ledger
.insert_shreds(vec![coding1[0].clone(), coding2[1].clone()], None, false)
.unwrap();
assert!(ledger.has_duplicate_shreds_in_slot(slot));
}
#[test]
fn test_large_num_coding() {
solana_logger::setup();
let slot = 1;
let (_data_shreds, mut coding_shreds, leader_schedule_cache) =
setup_erasure_shreds(slot, 0, 100);
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
coding_shreds[1].coding_header.num_coding_shreds = u16::MAX;
blockstore
.insert_shreds(
vec![coding_shreds[1].clone()],
Some(&leader_schedule_cache),
false,
)
.unwrap();
// Check no coding shreds are inserted
let res = blockstore.get_coding_shreds_for_slot(slot, 0).unwrap();
assert!(res.is_empty());
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_insert_data_shreds_same_slot_last_index() {
// Create RocksDb ledger
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create enough entries to ensure there are at least two shreds created
let num_unique_entries = max_ticks_per_n_shreds(1, None) + 1;
let (mut original_shreds, original_entries) =
make_slot_entries(0, 0, num_unique_entries);
// Discard first shred, so that the slot is not full
assert!(original_shreds.len() > 1);
let last_index = original_shreds.last().unwrap().index() as u64;
original_shreds.remove(0);
// Insert the same shreds, including the last shred specifically, multiple
// times
for _ in 0..10 {
blockstore
.insert_shreds(original_shreds.clone(), None, false)
.unwrap();
let meta = blockstore.meta(0).unwrap().unwrap();
assert!(!blockstore.is_dead(0));
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), vec![]);
assert_eq!(meta.consumed, 0);
assert_eq!(meta.received, last_index + 1);
assert_eq!(meta.parent_slot, 0);
assert_eq!(meta.last_index, last_index);
assert!(!blockstore.is_full(0));
}
let duplicate_shreds = entries_to_test_shreds(original_entries.clone(), 0, 0, true, 0);
let num_shreds = duplicate_shreds.len() as u64;
blockstore
.insert_shreds(duplicate_shreds, None, false)
.unwrap();
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), original_entries);
let meta = blockstore.meta(0).unwrap().unwrap();
assert_eq!(meta.consumed, num_shreds);
assert_eq!(meta.received, num_shreds);
assert_eq!(meta.parent_slot, 0);
assert_eq!(meta.last_index, num_shreds - 1);
assert!(blockstore.is_full(0));
assert!(!blockstore.is_dead(0));
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_duplicate_last_index() {
let num_shreds = 2;
let num_entries = max_ticks_per_n_shreds(num_shreds, None);
let slot = 1;
let (mut shreds, _) = make_slot_entries(slot, 0, num_entries);
// Mark both as last shred
shreds[0].set_last_in_slot();
shreds[1].set_last_in_slot();
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(blockstore.get_duplicate_slot(slot).is_some());
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_duplicate_last_index_mark_dead() {
let num_shreds = 10;
let smaller_last_shred_index = 5;
let larger_last_shred_index = 8;
let setup_test_shreds = |slot: Slot| -> Vec<Shred> {
let num_entries = max_ticks_per_n_shreds(num_shreds, None);
let (mut shreds, _) = make_slot_entries(slot, 0, num_entries);
shreds[smaller_last_shred_index].set_last_in_slot();
shreds[larger_last_shred_index].set_last_in_slot();
shreds
};
let get_expected_slot_meta_and_index_meta =
|blockstore: &Blockstore, shreds: Vec<Shred>| -> (SlotMeta, Index) {
let slot = shreds[0].slot();
blockstore
.insert_shreds(shreds.clone(), None, false)
.unwrap();
let meta = blockstore.meta(slot).unwrap().unwrap();
assert_eq!(meta.consumed, shreds.len() as u64);
let shreds_index = blockstore.get_index(slot).unwrap().unwrap();
for i in 0..shreds.len() as u64 {
assert!(shreds_index.data().is_present(i));
}
// Cleanup the slot
blockstore
.run_purge(slot, slot, PurgeType::PrimaryIndex)
.expect("Purge database operations failed");
assert!(blockstore.meta(slot).unwrap().is_none());
(meta, shreds_index)
};
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let mut slot = 0;
let shreds = setup_test_shreds(slot);
// Case 1: Insert in the same batch. Since we're inserting the shreds in order,
// any shreds > smaller_last_shred_index will not be inserted. Slot is not marked
// as dead because no slots > the first "last" index shred are inserted before
// the "last" index shred itself is inserted.
let (expected_slot_meta, expected_index) = get_expected_slot_meta_and_index_meta(
&blockstore,
shreds[..=smaller_last_shred_index].to_vec(),
);
blockstore
.insert_shreds(shreds.clone(), None, false)
.unwrap();
assert!(blockstore.get_duplicate_slot(slot).is_some());
assert!(!blockstore.is_dead(slot));
for i in 0..num_shreds {
if i <= smaller_last_shred_index as u64 {
assert_eq!(
blockstore.get_data_shred(slot, i).unwrap().unwrap(),
shreds[i as usize].payload
);
} else {
assert!(blockstore.get_data_shred(slot, i).unwrap().is_none());
}
}
let mut meta = blockstore.meta(slot).unwrap().unwrap();
meta.first_shred_timestamp = expected_slot_meta.first_shred_timestamp;
assert_eq!(meta, expected_slot_meta);
assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index);
// Case 2: Inserting a duplicate with an even smaller last shred index should not
// mark the slot as dead since the Slotmeta is full.
let mut even_smaller_last_shred_duplicate =
shreds[smaller_last_shred_index - 1].clone();
even_smaller_last_shred_duplicate.set_last_in_slot();
// Flip a byte to create a duplicate shred
even_smaller_last_shred_duplicate.payload[0] =
std::u8::MAX - even_smaller_last_shred_duplicate.payload[0];
assert!(blockstore
.is_shred_duplicate(
slot,
even_smaller_last_shred_duplicate.index(),
even_smaller_last_shred_duplicate.payload.clone(),
ShredType::Data,
)
.is_some());
blockstore
.insert_shreds(vec![even_smaller_last_shred_duplicate], None, false)
.unwrap();
assert!(!blockstore.is_dead(slot));
for i in 0..num_shreds {
if i <= smaller_last_shred_index as u64 {
assert_eq!(
blockstore.get_data_shred(slot, i).unwrap().unwrap(),
shreds[i as usize].payload
);
} else {
assert!(blockstore.get_data_shred(slot, i).unwrap().is_none());
}
}
let mut meta = blockstore.meta(slot).unwrap().unwrap();
meta.first_shred_timestamp = expected_slot_meta.first_shred_timestamp;
assert_eq!(meta, expected_slot_meta);
assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index);
// Case 3: Insert shreds in reverse so that consumed will not be updated. Now on insert, the
// the slot should be marked as dead
slot += 1;
let mut shreds = setup_test_shreds(slot);
shreds.reverse();
blockstore
.insert_shreds(shreds.clone(), None, false)
.unwrap();
assert!(blockstore.is_dead(slot));
// All the shreds other than the two last index shreds because those two
// are marked as last, but less than the first received index == 10.
// The others will be inserted even after the slot is marked dead on attempted
// insert of the first last_index shred since dead slots can still be
// inserted into.
for i in 0..num_shreds {
let shred_to_check = &shreds[i as usize];
let shred_index = shred_to_check.index() as u64;
if shred_index != smaller_last_shred_index as u64
&& shred_index != larger_last_shred_index as u64
{
assert_eq!(
blockstore
.get_data_shred(slot, shred_index)
.unwrap()
.unwrap(),
shred_to_check.payload
);
} else {
assert!(blockstore
.get_data_shred(slot, shred_index)
.unwrap()
.is_none());
}
}
// Case 4: Same as Case 3, but this time insert the shreds one at a time to test that the clearing
// of data shreds works even after they've been committed
slot += 1;
let mut shreds = setup_test_shreds(slot);
shreds.reverse();
for shred in shreds.clone() {
blockstore.insert_shreds(vec![shred], None, false).unwrap();
}
assert!(blockstore.is_dead(slot));
// All the shreds will be inserted since dead slots can still be inserted into.
for i in 0..num_shreds {
let shred_to_check = &shreds[i as usize];
let shred_index = shred_to_check.index() as u64;
if shred_index != smaller_last_shred_index as u64
&& shred_index != larger_last_shred_index as u64
{
assert_eq!(
blockstore
.get_data_shred(slot, shred_index)
.unwrap()
.unwrap(),
shred_to_check.payload
);
} else {
assert!(blockstore
.get_data_shred(slot, shred_index)
.unwrap()
.is_none());
}
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_get_slot_entries_dead_slot_race() {
let setup_test_shreds = move |slot: Slot| -> Vec<Shred> {
let num_shreds = 10;
let middle_shred_index = 5;
let num_entries = max_ticks_per_n_shreds(num_shreds, None);
let (shreds, _) = make_slot_entries(slot, 0, num_entries);
// Reverse shreds so that last shred gets inserted first and sets meta.received
let mut shreds: Vec<Shred> = shreds.into_iter().rev().collect();
// Push the real middle shred to the end of the shreds list
shreds.push(shreds[middle_shred_index].clone());
// Set the middle shred as a last shred to cause the slot to be marked dead
shreds[middle_shred_index].set_last_in_slot();
shreds
};
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let (slot_sender, slot_receiver) = channel();
let (shred_sender, shred_receiver) = channel::<Vec<Shred>>();
let (signal_sender, signal_receiver) = channel();
let t_entry_getter = {
let blockstore = blockstore.clone();
let signal_sender = signal_sender.clone();
Builder::new()
.spawn(move || {
while let Ok(slot) = slot_receiver.recv() {
match blockstore.get_slot_entries_with_shred_info(slot, 0, false) {
Ok((_entries, _num_shreds, is_full)) => {
if is_full {
signal_sender
.send(Err(IoError::new(
ErrorKind::Other,
"got full slot entries for dead slot",
)))
.unwrap();
}
}
Err(err) => {
assert_matches!(err, BlockstoreError::DeadSlot);
}
}
signal_sender.send(Ok(())).unwrap();
}
})
.unwrap()
};
let t_shred_inserter = Builder::new()
.spawn(move || {
while let Ok(shreds) = shred_receiver.recv() {
let slot = shreds[0].slot();
// Grab this lock to block `get_slot_entries` before it fetches completed datasets
// and then mark the slot as dead, but full, by inserting carefully crafted shreds.
let _lowest_cleanup_slot = blockstore.lowest_cleanup_slot.write().unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(blockstore.get_duplicate_slot(slot).is_some());
assert!(blockstore.is_dead(slot));
assert!(blockstore.meta(slot).unwrap().unwrap().is_full());
signal_sender.send(Ok(())).unwrap();
}
})
.unwrap();
for slot in 0..100 {
let shreds = setup_test_shreds(slot);
// Start a task on each thread to trigger a race condition
slot_sender.send(slot).unwrap();
shred_sender.send(shreds).unwrap();
// Check that each thread processed their task before continuing
for _ in 1..=2 {
let res = signal_receiver.recv().unwrap();
assert!(res.is_ok(), "race condition: {:?}", res);
}
}
drop(slot_sender);
drop(shred_sender);
let handles = vec![t_entry_getter, t_shred_inserter];
for handle in handles {
assert!(handle.join().is_ok());
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_read_write_cost_table() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries: usize = 10;
let mut cost_table: HashMap<Pubkey, u64> = HashMap::new();
for x in 1..num_entries + 1 {
cost_table.insert(Pubkey::new_unique(), (x + 100) as u64);
}
// write to db
for (key, cost) in cost_table.iter() {
blockstore
.write_program_cost(key, cost)
.expect("write a program");
}
// read back from db
let read_back = blockstore.read_program_costs().expect("read programs");
// verify
assert_eq!(read_back.len(), cost_table.len());
for (read_key, read_cost) in read_back {
assert_eq!(read_cost, *cost_table.get(&read_key).unwrap());
}
// update value, write to db
for val in cost_table.values_mut() {
*val += 100;
}
for (key, cost) in cost_table.iter() {
blockstore
.write_program_cost(key, cost)
.expect("write a program");
}
// add a new record
let new_program_key = Pubkey::new_unique();
let new_program_cost = 999;
blockstore
.write_program_cost(&new_program_key, &new_program_cost)
.unwrap();
// confirm value updated
let read_back = blockstore.read_program_costs().expect("read programs");
// verify
assert_eq!(read_back.len(), cost_table.len() + 1);
for (key, cost) in cost_table.iter() {
assert_eq!(*cost, read_back.iter().find(|(k, _v)| k == key).unwrap().1);
}
assert_eq!(
new_program_cost,
read_back
.iter()
.find(|(k, _v)| *k == new_program_key)
.unwrap()
.1
);
// test delete
blockstore
.delete_program_cost(&new_program_key)
.expect("delete a progrma");
let read_back = blockstore.read_program_costs().expect("read programs");
// verify
assert_eq!(read_back.len(), cost_table.len());
for (read_key, read_cost) in read_back {
assert_eq!(read_cost, *cost_table.get(&read_key).unwrap());
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_delete_old_records_from_cost_table() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries: usize = 10;
let mut cost_table: HashMap<Pubkey, u64> = HashMap::new();
for x in 1..num_entries + 1 {
cost_table.insert(Pubkey::new_unique(), (x + 100) as u64);
}
// write to db
for (key, cost) in cost_table.iter() {
blockstore
.write_program_cost(key, cost)
.expect("write a program");
}
// remove a record
let mut removed_key = Pubkey::new_unique();
for (key, cost) in cost_table.iter() {
if *cost == 101_u64 {
removed_key = *key;
break;
}
}
cost_table.remove(&removed_key);
// delete records from blockstore if they are no longer in cost_table
let db_records = blockstore.read_program_costs().expect("read programs");
db_records.iter().for_each(|(pubkey, _)| {
if !cost_table.iter().any(|(key, _)| key == pubkey) {
assert_eq!(*pubkey, removed_key);
blockstore
.delete_program_cost(pubkey)
.expect("delete old program");
}
});
// read back from db
let read_back = blockstore.read_program_costs().expect("read programs");
// verify
assert_eq!(read_back.len(), cost_table.len());
for (read_key, read_cost) in read_back {
assert_eq!(read_cost, *cost_table.get(&read_key).unwrap());
}
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}
| 38.688448 | 173 | 0.528123 |
7246f3fe99c06479474d0aa5a943323929118e2e | 1,402 | use reqwest::StatusCode;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ShopifyError {
#[error("not found")]
NotFound,
#[error(
"request error: path = '{path}', status = '{status}', body = '{body}'"
)]
Request {
path: String,
status: StatusCode,
body: String,
},
#[error("invalid response")]
InvalidResponse,
#[error("http error: {0}")]
Http(#[from] reqwest::Error),
#[error("io error: {0}")]
Io(#[from] std::io::Error),
#[error("json error: {0}")]
Json(#[from] serde_json::Error),
#[error("url parse error: {0}")]
UrlParse(#[from] url::ParseError),
#[error("page_info parameter was not found in the link url")]
PageInfoNotPresent,
}
impl ShopifyError {
pub fn should_try_again(&self) -> bool {
match *self {
ShopifyError::Request { status, .. } => {
let code = status.as_u16();
// 429 Too Many Requests
code == 429 || code == 500 || code == 503
}
ShopifyError::Io(_) => true,
_ => false,
}
}
}
pub type ShopifyResult<T> = Result<T, ShopifyError>;
pub trait OptionalShopifyResult<T> {
fn optional(self) -> ShopifyResult<Option<T>>;
}
impl<T> OptionalShopifyResult<T> for ShopifyResult<T> {
fn optional(self) -> ShopifyResult<Option<T>> {
match self {
Ok(v) => Ok(Some(v)),
Err(ShopifyError::NotFound) => Ok(None),
Err(e) => Err(e),
}
}
}
| 21.242424 | 74 | 0.589158 |
e808d9819986f1c55cfd2e0cc7acdfd77d954974 | 7,440 | use crate::leb128::{self, read_signed_leb128, write_signed_leb128};
use crate::serialize;
use std::borrow::Cow;
// -----------------------------------------------------------------------------
// Encoder
// -----------------------------------------------------------------------------
pub type EncodeResult = Result<(), !>;
pub struct Encoder {
pub data: Vec<u8>,
}
impl Encoder {
pub fn new(data: Vec<u8>) -> Encoder {
Encoder { data }
}
pub fn into_inner(self) -> Vec<u8> {
self.data
}
#[inline]
pub fn emit_raw_bytes(&mut self, s: &[u8]) {
self.data.extend_from_slice(s);
}
}
macro_rules! write_uleb128 {
($enc:expr, $value:expr, $fun:ident) => {{
leb128::$fun(&mut $enc.data, $value);
Ok(())
}};
}
macro_rules! write_sleb128 {
($enc:expr, $value:expr) => {{
write_signed_leb128(&mut $enc.data, $value as i128);
Ok(())
}};
}
impl serialize::Encoder for Encoder {
type Error = !;
#[inline]
fn emit_unit(&mut self) -> EncodeResult {
Ok(())
}
#[inline]
fn emit_usize(&mut self, v: usize) -> EncodeResult {
write_uleb128!(self, v, write_usize_leb128)
}
#[inline]
fn emit_u128(&mut self, v: u128) -> EncodeResult {
write_uleb128!(self, v, write_u128_leb128)
}
#[inline]
fn emit_u64(&mut self, v: u64) -> EncodeResult {
write_uleb128!(self, v, write_u64_leb128)
}
#[inline]
fn emit_u32(&mut self, v: u32) -> EncodeResult {
write_uleb128!(self, v, write_u32_leb128)
}
#[inline]
fn emit_u16(&mut self, v: u16) -> EncodeResult {
write_uleb128!(self, v, write_u16_leb128)
}
#[inline]
fn emit_u8(&mut self, v: u8) -> EncodeResult {
self.data.push(v);
Ok(())
}
#[inline]
fn emit_isize(&mut self, v: isize) -> EncodeResult {
write_sleb128!(self, v)
}
#[inline]
fn emit_i128(&mut self, v: i128) -> EncodeResult {
write_sleb128!(self, v)
}
#[inline]
fn emit_i64(&mut self, v: i64) -> EncodeResult {
write_sleb128!(self, v)
}
#[inline]
fn emit_i32(&mut self, v: i32) -> EncodeResult {
write_sleb128!(self, v)
}
#[inline]
fn emit_i16(&mut self, v: i16) -> EncodeResult {
write_sleb128!(self, v)
}
#[inline]
fn emit_i8(&mut self, v: i8) -> EncodeResult {
let as_u8: u8 = unsafe { ::std::mem::transmute(v) };
self.emit_u8(as_u8)
}
#[inline]
fn emit_bool(&mut self, v: bool) -> EncodeResult {
self.emit_u8(if v { 1 } else { 0 })
}
#[inline]
fn emit_f64(&mut self, v: f64) -> EncodeResult {
let as_u64: u64 = unsafe { ::std::mem::transmute(v) };
self.emit_u64(as_u64)
}
#[inline]
fn emit_f32(&mut self, v: f32) -> EncodeResult {
let as_u32: u32 = unsafe { ::std::mem::transmute(v) };
self.emit_u32(as_u32)
}
#[inline]
fn emit_char(&mut self, v: char) -> EncodeResult {
self.emit_u32(v as u32)
}
#[inline]
fn emit_str(&mut self, v: &str) -> EncodeResult {
self.emit_usize(v.len())?;
self.emit_raw_bytes(v.as_bytes());
Ok(())
}
}
impl Encoder {
#[inline]
pub fn position(&self) -> usize {
self.data.len()
}
}
// -----------------------------------------------------------------------------
// Decoder
// -----------------------------------------------------------------------------
pub struct Decoder<'a> {
pub data: &'a [u8],
position: usize,
}
impl<'a> Decoder<'a> {
#[inline]
pub fn new(data: &'a [u8], position: usize) -> Decoder<'a> {
Decoder { data, position }
}
#[inline]
pub fn position(&self) -> usize {
self.position
}
#[inline]
pub fn set_position(&mut self, pos: usize) {
self.position = pos
}
#[inline]
pub fn advance(&mut self, bytes: usize) {
self.position += bytes;
}
#[inline]
pub fn read_raw_bytes(&mut self, s: &mut [u8]) -> Result<(), String> {
let start = self.position;
let end = start + s.len();
s.copy_from_slice(&self.data[start..end]);
self.position = end;
Ok(())
}
}
macro_rules! read_uleb128 {
($dec:expr, $t:ty, $fun:ident) => {{
let (value, bytes_read) = leb128::$fun(&$dec.data[$dec.position..]);
$dec.position += bytes_read;
Ok(value)
}};
}
macro_rules! read_sleb128 {
($dec:expr, $t:ty) => {{
let (value, bytes_read) = read_signed_leb128($dec.data, $dec.position);
$dec.position += bytes_read;
Ok(value as $t)
}};
}
impl<'a> serialize::Decoder for Decoder<'a> {
type Error = String;
#[inline]
fn read_nil(&mut self) -> Result<(), Self::Error> {
Ok(())
}
#[inline]
fn read_u128(&mut self) -> Result<u128, Self::Error> {
read_uleb128!(self, u128, read_u128_leb128)
}
#[inline]
fn read_u64(&mut self) -> Result<u64, Self::Error> {
read_uleb128!(self, u64, read_u64_leb128)
}
#[inline]
fn read_u32(&mut self) -> Result<u32, Self::Error> {
read_uleb128!(self, u32, read_u32_leb128)
}
#[inline]
fn read_u16(&mut self) -> Result<u16, Self::Error> {
read_uleb128!(self, u16, read_u16_leb128)
}
#[inline]
fn read_u8(&mut self) -> Result<u8, Self::Error> {
let value = self.data[self.position];
self.position += 1;
Ok(value)
}
#[inline]
fn read_usize(&mut self) -> Result<usize, Self::Error> {
read_uleb128!(self, usize, read_usize_leb128)
}
#[inline]
fn read_i128(&mut self) -> Result<i128, Self::Error> {
read_sleb128!(self, i128)
}
#[inline]
fn read_i64(&mut self) -> Result<i64, Self::Error> {
read_sleb128!(self, i64)
}
#[inline]
fn read_i32(&mut self) -> Result<i32, Self::Error> {
read_sleb128!(self, i32)
}
#[inline]
fn read_i16(&mut self) -> Result<i16, Self::Error> {
read_sleb128!(self, i16)
}
#[inline]
fn read_i8(&mut self) -> Result<i8, Self::Error> {
let as_u8 = self.data[self.position];
self.position += 1;
unsafe { Ok(::std::mem::transmute(as_u8)) }
}
#[inline]
fn read_isize(&mut self) -> Result<isize, Self::Error> {
read_sleb128!(self, isize)
}
#[inline]
fn read_bool(&mut self) -> Result<bool, Self::Error> {
let value = self.read_u8()?;
Ok(value != 0)
}
#[inline]
fn read_f64(&mut self) -> Result<f64, Self::Error> {
let bits = self.read_u64()?;
Ok(f64::from_bits(bits))
}
#[inline]
fn read_f32(&mut self) -> Result<f32, Self::Error> {
let bits = self.read_u32()?;
Ok(f32::from_bits(bits))
}
#[inline]
fn read_char(&mut self) -> Result<char, Self::Error> {
let bits = self.read_u32()?;
Ok(::std::char::from_u32(bits).unwrap())
}
#[inline]
fn read_str(&mut self) -> Result<Cow<'_, str>, Self::Error> {
let len = self.read_usize()?;
let s = ::std::str::from_utf8(&self.data[self.position..self.position + len]).unwrap();
self.position += len;
Ok(Cow::Borrowed(s))
}
#[inline]
fn error(&mut self, err: &str) -> Self::Error {
err.to_string()
}
}
| 23.322884 | 95 | 0.522177 |
1ed231c0f3f48abfe7afd5a8af23fe98fd746196 | 6,142 | use super::project;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::io::Write;
#[derive(Clone, Serialize, Deserialize)]
pub struct Repo {
origin: String,
}
#[derive(Default, Clone, Serialize, Deserialize)]
pub struct Index {
pub repos: HashMap<String, Repo>,
}
pub fn index(project: &project::Config) -> HashSet<PathBuf> {
// shortcut so we dont try to write to read only modules such when installed as distro pkg
if project.repos.len() == 0 {
return HashSet::new();
}
let td = super::project::target_dir();
let cachepath = td.join("repos").join("index");
let index = if let Some(index) = cache("zz.toml", &cachepath) {
index
} else {
let mut index = Index::default();
std::fs::create_dir_all(td.join("repos"))
.expect("cannot create target/repos");
for (name, surl) in &project.repos {
let url = match url::Url::parse(surl) {
Ok(v) => v,
Err(url::ParseError::RelativeUrlWithoutBase) => {
url::Url::parse(&format!("file://{}", surl))
.expect(&format!("unable to parse repo url: {}", surl))
}
Err(e) => {
panic!(format!("unable to parse repo url: {}: {}", surl, e));
}
};
match url.scheme() {
"file" => {}
"https" => {
panic!(format!(
"unsupported scheme in repo url: {}, did you mean git:// ?",
surl
));
}
"git" | "git+ssh" => {
let np = td.join("repos").join(name);
if np.exists() {
std::fs::remove_dir_all(&np).expect(&format!("cannot remove {:?}", np));
}
std::fs::create_dir_all(&np).expect(&format!("cannot create {:?}", np));
println!("downloading {}", surl);
Command::new("git")
.args(&["clone", "-q", surl, np.to_string_lossy().as_ref()])
.status()
.expect("failed to execute git");
let np_sub = np.join("modules");
if !np_sub.exists() {
if !np.join("zz.toml").exists() {
panic!(
"unsupported repo in url: {} : no zz.toml or modules subdir",
surl
);
}
let npx = td.join("repos").join("___").join(name.clone());
if npx.exists() {
std::fs::remove_dir_all(&npx)
.expect(&format!("cannot remove {:?}", npx));
}
std::fs::create_dir_all(&npx).expect(&format!("cannot create {:?}", npx));
std::fs::rename(&np, &npx).expect(&format!(
"cannot move {:?} to {:?}",
np,
npx
));
}
}
_ => {
panic!(format!("unsupported scheme in repo url: {}", surl));
}
}
index.repos.insert(
name.clone(),
Repo {
origin: surl.clone(),
},
);
}
let mut cachefile =
std::fs::File::create(&cachepath).expect(&format!("cannot create {:?}", cachepath));
cachefile.write(
&rmp_serde::to_vec(&index).expect(&format!("cannot encode {:?}", cachepath))[..]
).expect(&format!("cannot write {:?}", cachepath));
index
};
let mut searchpaths = HashSet::new();
for (name, repo) in &index.repos {
let url = match url::Url::parse(&repo.origin) {
Ok(v) => v,
Err(url::ParseError::RelativeUrlWithoutBase) => {
url::Url::parse(&format!("file://{}", repo.origin))
.expect(&format!("unable to parse repo url: {}", repo.origin))
}
Err(e) => {
panic!(format!("unable to parse repo url: {}: {}", repo.origin, e));
}
};
match url.scheme() {
"file" => {
let path = Path::new(url.path()).to_path_buf();
searchpaths.insert(path.canonicalize().unwrap_or(path));
let path = Path::new(url.path()).join("modules");
searchpaths.insert(path.canonicalize().unwrap_or(path));
}
_ => {
let path = td.join("repos").join("___");
searchpaths.insert(path.canonicalize().unwrap_or(path));
let path = td.join("repos").join(name).join("modules");
searchpaths.insert(path.canonicalize().unwrap_or(path));
}
}
}
return searchpaths;
}
pub fn cache(source_file: &str, cache_file: &Path) -> Option<Index> {
let m1 = match std::fs::metadata(&source_file) {
Ok(v) => v,
Err(_) => return None,
};
let m1 = m1
.modified()
.expect(&format!("cannot stat {:?}", source_file));
let m2 = match std::fs::metadata(&cache_file) {
Ok(v) => v,
Err(_) => return None,
};
let m2 = m2
.modified()
.expect(&format!("cannot stat {:?}", cache_file));
if m1 > m2 {
return None;
}
match std::fs::File::open(&cache_file) {
Ok(f) => match rmp_serde::from_read(&f) {
Ok(cf) => {
return Some(cf);
}
Err(_) => {
std::fs::remove_file(&cache_file)
.expect(&format!("cannot remove {:?}", cache_file));
}
},
Err(_) => (),
};
return None;
}
| 34.505618 | 98 | 0.438131 |
7682d31c12206dd5f73b05d4af54f452951acafc | 6,764 | // Copyright 2018-2020 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::config::{ConfigError, ConfigSource, PartialConfig, PartialConfigBuilder};
const DEFAULT_CERT_DIR: &str = "/etc/splinter/certs/";
const DEFAULT_STATE_DIR: &str = "/var/lib/splinter/";
const CLIENT_CERT: &str = "client.crt";
const CLIENT_KEY: &str = "private/client.key";
const SERVER_CERT: &str = "server.crt";
const SERVER_KEY: &str = "private/server.key";
const CA_PEM: &str = "ca.pem";
const REGISTRY_AUTO_REFRESH_DEFAULT: u64 = 600; // 600 seconds = 10 minutes
const REGISTRY_FORCED_REFRESH_DEFAULT: u64 = 10; // 10 seconds
const HEARTBEAT_DEFAULT: u64 = 30;
const DEFAULT_ADMIN_SERVICE_COORDINATOR_TIMEOUT: u64 = 30; // 30 seconds
/// Holds the default configuration values.
pub struct DefaultPartialConfigBuilder;
impl DefaultPartialConfigBuilder {
pub fn new() -> Self {
DefaultPartialConfigBuilder {}
}
}
impl PartialConfigBuilder for DefaultPartialConfigBuilder {
fn build(self) -> Result<PartialConfig, ConfigError> {
let mut partial_config = PartialConfig::new(ConfigSource::Default);
partial_config = partial_config
.with_storage(Some(String::from("yaml")))
.with_tls_cert_dir(Some(String::from(DEFAULT_CERT_DIR)))
.with_tls_ca_file(Some(String::from(CA_PEM)))
.with_tls_client_cert(Some(String::from(CLIENT_CERT)))
.with_tls_client_key(Some(String::from(CLIENT_KEY)))
.with_tls_server_cert(Some(String::from(SERVER_CERT)))
.with_tls_server_key(Some(String::from(SERVER_KEY)))
.with_service_endpoint(Some(String::from("127.0.0.1:8043")))
.with_network_endpoints(Some(vec![String::from("127.0.0.1:8044")]))
.with_peers(Some(vec![]))
.with_bind(Some(String::from("127.0.0.1:8080")))
.with_registries(Some(vec![]))
.with_registry_auto_refresh_interval(Some(REGISTRY_AUTO_REFRESH_DEFAULT))
.with_registry_forced_refresh_interval(Some(REGISTRY_FORCED_REFRESH_DEFAULT))
.with_heartbeat_interval(Some(HEARTBEAT_DEFAULT))
.with_admin_service_coordinator_timeout(Some(DEFAULT_ADMIN_SERVICE_COORDINATOR_TIMEOUT))
.with_state_dir(Some(String::from(DEFAULT_STATE_DIR)))
.with_tls_insecure(Some(false))
.with_no_tls(Some(false));
#[cfg(feature = "biome")]
{
partial_config = partial_config.with_biome_enabled(Some(false));
}
#[cfg(feature = "database")]
{
partial_config = partial_config.with_database(Some(String::from("127.0.0.1:5432")));
}
Ok(partial_config)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
/// Asserts config values based on the default values.
fn assert_default_values(config: PartialConfig) {
assert_eq!(config.storage(), Some(String::from("yaml")));
assert_eq!(config.tls_cert_dir(), Some(String::from(DEFAULT_CERT_DIR)));
assert_eq!(config.tls_ca_file(), Some(String::from(CA_PEM)));
assert_eq!(config.tls_client_cert(), Some(String::from(CLIENT_CERT)));
assert_eq!(config.tls_client_key(), Some(String::from(CLIENT_KEY)));
assert_eq!(config.tls_server_cert(), Some(String::from(SERVER_CERT)));
assert_eq!(config.tls_server_key(), Some(String::from(SERVER_KEY)));
assert_eq!(
config.service_endpoint(),
Some(String::from("127.0.0.1:8043"))
);
assert_eq!(
config.network_endpoints(),
Some(vec![String::from("127.0.0.1:8044")])
);
assert_eq!(config.peers(), Some(vec![]));
assert_eq!(config.node_id(), None);
assert_eq!(config.display_name(), None);
assert_eq!(config.bind(), Some(String::from("127.0.0.1:8080")));
#[cfg(feature = "database")]
assert_eq!(config.database(), Some(String::from("127.0.0.1:5432")));
assert_eq!(config.registries(), Some(vec![]));
assert_eq!(
config.registry_auto_refresh_interval(),
Some(REGISTRY_AUTO_REFRESH_DEFAULT)
);
assert_eq!(
config.registry_forced_refresh_interval(),
Some(REGISTRY_FORCED_REFRESH_DEFAULT)
);
assert_eq!(config.heartbeat_interval(), Some(HEARTBEAT_DEFAULT));
assert_eq!(
config.admin_service_coordinator_timeout(),
Some(Duration::from_secs(
DEFAULT_ADMIN_SERVICE_COORDINATOR_TIMEOUT
))
);
assert_eq!(config.state_dir(), Some(String::from(DEFAULT_STATE_DIR)));
assert_eq!(config.tls_insecure(), Some(false));
assert_eq!(config.no_tls(), Some(false));
#[cfg(feature = "biome")]
assert_eq!(config.biome_enabled(), Some(false));
// Assert the source is correctly identified for this PartialConfig object.
assert_eq!(config.source(), ConfigSource::Default);
}
#[test]
/// This test verifies that a PartialConfig object is accurately constructed by using the
/// `build` method implemented by the DefaultPartialConfigBuilder module. The following steps
/// are performed:
///
/// 1. An empty DefaultPartialConfigBuilder object is constructed, which implements the
/// PartialConfigBuilder trait.
/// 2. A PartialConfig object is created by calling the `build` method of the
/// DefaultPartialConfigBuilder object.
///
/// This test then verifies the PartialConfig object built from the DefaulConfig object has
/// the correct values by asserting each expected value.
fn test_default_builder() {
// Create a new DefaultPartialConfigBuilder object, which implements the
// PartialConfigBuilder trait.
let default_config = DefaultPartialConfigBuilder::new();
// Create a PartialConfig object using the `build` method.
let partial_config = default_config
.build()
.expect("Unable to build DefaultPartialConfigBuilder");
// Compare the generated PartialConfig object against the expected values.
assert_default_values(partial_config);
}
}
| 43.358974 | 100 | 0.667209 |
d59f66bbb071b23a95da2d253ae640ff84992d09 | 6,397 | use super::generics::GenericArguments;
use proc_macro2::{Ident, Span};
use quote::{quote, quote_spanned};
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
use std::convert::TryFrom;
use syn::parse::Result;
use syn::spanned::Spanned;
use syn::{Error, Expr, Field, Type, Visibility};
#[allow(clippy::large_enum_variant)]
#[derive(PartialEq, Eq)]
enum PropAttr {
Required { wrapped_name: Ident },
PropOr(Expr),
PropOrElse(Expr),
PropOrDefault,
}
#[derive(Eq)]
pub struct PropField {
ty: Type,
name: Ident,
attr: PropAttr,
}
impl PropField {
/// All required property fields are wrapped in an `Option`
pub fn is_required(&self) -> bool {
match self.attr {
PropAttr::Required { .. } => true,
_ => false,
}
}
/// This step name is descriptive to help a developer realize they missed a required prop
pub fn to_step_name(&self, props_name: &Ident) -> Ident {
Ident::new(
&format!("{}_missing_required_prop_{}", props_name, self.name),
Span::call_site(),
)
}
/// Used to transform the `PropWrapper` struct into `Properties`
pub fn to_field_setter(&self) -> proc_macro2::TokenStream {
let name = &self.name;
match &self.attr {
PropAttr::Required { wrapped_name } => {
quote! {
#name: self.wrapped.#wrapped_name.unwrap(),
}
}
_ => {
quote! {
#name: self.wrapped.#name,
}
}
}
}
/// Wrap all required props in `Option`
pub fn to_field_def(&self) -> proc_macro2::TokenStream {
let ty = &self.ty;
match &self.attr {
PropAttr::Required { wrapped_name } => {
quote! {
#wrapped_name: ::std::option::Option<#ty>,
}
}
_ => {
let name = &self.name;
quote! {
#name: #ty,
}
}
}
}
/// All optional props must implement the `Default` trait
pub fn to_default_setter(&self) -> proc_macro2::TokenStream {
match &self.attr {
PropAttr::Required { wrapped_name } => {
quote! {
#wrapped_name: ::std::option::Option::None,
}
}
PropAttr::PropOr(value) => {
let name = &self.name;
let span = value.span();
quote_spanned! {span=>
#name: #value,
}
}
PropAttr::PropOrElse(func) => {
let name = &self.name;
let span = func.span();
quote_spanned! {span=>
#name: (#func)(),
}
}
PropAttr::PropOrDefault => {
let name = &self.name;
quote! {
#name: ::std::default::Default::default(),
}
}
}
}
/// Each field is set using a builder method
pub fn to_build_step_fn(
&self,
builder_name: &Ident,
generic_arguments: &GenericArguments,
vis: &Visibility,
) -> proc_macro2::TokenStream {
let Self { name, ty, attr } = self;
match attr {
PropAttr::Required { wrapped_name } => {
quote! {
#[doc(hidden)]
#vis fn #name(mut self, #name: #ty) -> #builder_name<#generic_arguments> {
self.wrapped.#wrapped_name = ::std::option::Option::Some(#name);
#builder_name {
wrapped: self.wrapped,
_marker: ::std::marker::PhantomData,
}
}
}
}
_ => {
quote! {
#[doc(hidden)]
#vis fn #name(mut self, #name: #ty) -> #builder_name<#generic_arguments> {
self.wrapped.#name = #name;
self
}
}
}
}
}
// Detect Properties 2.0 attributes
fn attribute(named_field: &Field) -> Result<PropAttr> {
let attr = named_field.attrs.iter().find(|attr| {
attr.path.is_ident("prop_or")
|| attr.path.is_ident("prop_or_else")
|| attr.path.is_ident("prop_or_default")
});
if let Some(attr) = attr {
if attr.path.is_ident("prop_or") {
Ok(PropAttr::PropOr(attr.parse_args()?))
} else if attr.path.is_ident("prop_or_else") {
Ok(PropAttr::PropOrElse(attr.parse_args()?))
} else if attr.path.is_ident("prop_or_default") {
Ok(PropAttr::PropOrDefault)
} else {
unreachable!()
}
} else {
let ident = named_field.ident.as_ref().unwrap();
let wrapped_name = Ident::new(&format!("{}_wrapper", ident), Span::call_site());
Ok(PropAttr::Required { wrapped_name })
}
}
}
impl TryFrom<Field> for PropField {
type Error = Error;
fn try_from(field: Field) -> Result<Self> {
Ok(PropField {
attr: Self::attribute(&field)?,
ty: field.ty,
name: field.ident.unwrap(),
})
}
}
impl PartialOrd for PropField {
fn partial_cmp(&self, other: &PropField) -> Option<Ordering> {
if self.name == other.name {
Some(Ordering::Equal)
} else if self.name == "children" {
Some(Ordering::Greater)
} else if other.name == "children" {
Some(Ordering::Less)
} else {
self.name.partial_cmp(&other.name)
}
}
}
impl Ord for PropField {
fn cmp(&self, other: &PropField) -> Ordering {
if self.name == other.name {
Ordering::Equal
} else if self.name == "children" {
Ordering::Greater
} else if other.name == "children" {
Ordering::Less
} else {
self.name.cmp(&other.name)
}
}
}
impl PartialEq for PropField {
fn eq(&self, other: &Self) -> bool {
self.name == other.name
}
}
| 30.032864 | 94 | 0.477099 |
6162acb7446c2251b6216f092f7c75f2e02ca05e | 2,942 | use libc::{c_char, c_int, c_void, size_t};
use {TF_Buffer, TF_Graph, TF_Operation, TF_Output, TF_Status, TF_Tensor};
#[derive(Clone, Copy, Debug)]
pub enum TF_Session {}
#[derive(Clone, Copy, Debug)]
pub enum TF_SessionOptions {}
extern {
pub fn TF_NewSession(graph: *mut TF_Graph,
options: *const TF_SessionOptions,
status: *mut TF_Status)
-> *mut TF_Session;
pub fn TF_CloseSession(session: *mut TF_Session, status: *mut TF_Status);
pub fn TF_DeleteSession(session: *mut TF_Session, status: *mut TF_Status);
pub fn TF_SessionRun(session: *mut TF_Session,
run_options: *const TF_Buffer,
inputs: *const TF_Output,
input_values: *const *mut TF_Tensor,
num_inputs: c_int,
outputs: *const TF_Output,
output_values: *mut *mut TF_Tensor,
num_outputs: c_int,
targets: *const *const TF_Operation,
num_targets: c_int,
run_metadata: *mut TF_Buffer,
status: *mut TF_Status);
pub fn TF_SessionPRunSetup(session: *mut TF_Session,
inputs: *const TF_Output,
num_inputs: c_int,
outputs: *const TF_Output,
num_outputs: c_int,
targets: *const *const TF_Operation,
num_targets: c_int,
handle: *mut *const c_char,
status: *mut TF_Status);
pub fn TF_SessionPRun(session: *mut TF_Session,
handle: *const c_char,
inputs: *const TF_Output,
input_values: *const *mut TF_Tensor,
num_inputs: c_int,
outputs: *const TF_Output,
output_values: *mut *mut TF_Tensor,
num_outputs: c_int,
targets: *const *const TF_Operation,
num_targets: c_int,
status: *mut TF_Status);
pub fn TF_Reset(options: *const TF_SessionOptions,
containers: *mut *const c_char,
num_containers: c_int,
status: *mut TF_Status);
}
extern {
pub fn TF_NewSessionOptions() -> *mut TF_SessionOptions;
pub fn TF_DeleteSessionOptions(options: *mut TF_SessionOptions);
pub fn TF_SetTarget(options: *mut TF_SessionOptions, target: *const c_char);
pub fn TF_SetConfig(options: *mut TF_SessionOptions,
proto: *const c_void,
proto_length: size_t,
status: *mut TF_Status);
}
| 45.261538 | 80 | 0.502719 |
b9cb8628a8b3a8df58979ec407cdd286cadc0a39 | 794 | use digest::dev::{digest_test, one_million_a};
use digest::new_test;
new_test!(sha224_main, "sha224", sha2::Sha224, digest_test);
new_test!(sha256_main, "sha256", sha2::Sha256, digest_test);
new_test!(sha384_main, "sha384", sha2::Sha384, digest_test);
new_test!(sha512_main, "sha512", sha2::Sha512, digest_test);
new_test!(
sha512_224_main,
"sha512_224",
sha2::Sha512Trunc224,
digest_test
);
new_test!(
sha512_256_main,
"sha512_256",
sha2::Sha512Trunc256,
digest_test
);
#[test]
fn sha256_1million_a() {
let output = include_bytes!("data/sha256_one_million_a.bin");
one_million_a::<sha2::Sha256>(output);
}
#[test]
fn sha512_1million_a() {
let output = include_bytes!("data/sha512_one_million_a.bin");
one_million_a::<sha2::Sha512>(output);
}
| 24.8125 | 65 | 0.709068 |
9b1f27169d11af78c3fc966b6dad5c9b16e22156 | 23 | mod fixture;
mod fuzz;
| 7.666667 | 12 | 0.73913 |
f96c11f62e3d801720725b5c0da94718a6bcf03e | 685 | use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use termion::event::Key;
use termion::input::TermRead;
pub enum Event {
Tick,
Input(Key),
}
pub fn receiver() -> mpsc::Receiver<Event> {
let (timer_tx, event) = mpsc::channel();
let input_tx = timer_tx.clone();
thread::spawn(move || loop {
thread::sleep(Duration::from_millis(500));
timer_tx.send(Event::Tick).expect("tick failed");
});
thread::spawn(move || {
let stdin = std::io::stdin();
for c in stdin.keys() {
let key = c.unwrap_or(Key::Null);
input_tx.send(Event::Input(key)).expect("input failed");
}
});
event
}
| 24.464286 | 68 | 0.579562 |
ac6377da101ebf1bf409a6f0d8e3d612d01be80b | 2,962 | use pest::iterators::Pair;
use crate::parser::Rule;
#[derive(Clone, Debug)]
#[allow(dead_code)]
pub enum Expression {
Assignment {
identifier: String,
kind: Option<String>,
value: Box<Expression>
},
FunctionCall {
identifier: String,
arguments: Option<Vec<FunctionArgument>>
},
Value { as_string: String },
Null
}
#[derive(Clone, Debug)]
pub struct FunctionArgument {
label: Option<String>,
value: Expression
}
fn parse_fn_arg(arg: Pair<Rule>) -> FunctionArgument {
let mut label = None;
let mut value = Expression::Null;
for node in arg.into_inner() {
match node.as_rule() {
Rule::label => label = Some(String::from(node.as_str())),
Rule::expression => value = parse_expression(node),
_ => println!("UNCHECKED RULE: {:?}", node.as_rule())
}
}
FunctionArgument { label, value }
}
fn parse_fn_args(arg_list: Pair<Rule>) -> Option<Vec<FunctionArgument>> {
let mut args: Vec<FunctionArgument> = Vec::new();
for node in arg_list.into_inner() {
match node.as_rule() {
Rule::function_arg => args.push(parse_fn_arg(node)),
_ => unreachable!()
}
}
Some(args)
}
fn parse_function_call(fn_call: Pair<Rule>) -> Expression {
let mut identifier = String::new();
let mut arguments = None;
for node in fn_call.into_inner() {
match node.as_rule() {
Rule::identifier => identifier = String::from(node.as_str()),
Rule::function_arg_list => arguments = parse_fn_args(node),
_ => println!("UNCHECKED RULE: {:?}", node.as_rule())
}
}
Expression::FunctionCall { identifier, arguments }
}
fn parse_assignment(expr: Pair<Rule>) -> Expression {
let mut identifier = String::new();
let mut value = Box::new(Expression::Null);
let mut kind = None;
for node in expr.into_inner() {
match node.as_rule() {
Rule::identifier => identifier = String::from(node.as_str()),
Rule::kind => kind = Some(String::from(node.as_str())),
Rule::expression => {
let parsed_value = parse_expression(node);
value = Box::new(parsed_value);
},
_ => unreachable!()
}
}
Expression::Assignment { identifier, value, kind }
}
pub fn parse_expression(expr: Pair<Rule>) -> Expression {
let mut new_expr = Expression::Null;
for node in expr.into_inner() {
match node.as_rule() {
Rule::function_call => new_expr = parse_function_call(node),
Rule::assignment => new_expr = parse_assignment(node),
Rule::value => {
new_expr = Expression::Value {
as_string: String::from(node.as_str())
}
},
_ => println!("UNCHECKED RULE: {:?}", node.as_rule())
}
}
new_expr
} | 27.943396 | 73 | 0.574274 |
1a0a4f7806f09e581ee8c6c8125b6d31693f7e4f | 31,304 | #![allow(unused_assignments)]
use crate::ebml::{vid, vint, Error};
use nom::{
number::streaming::{be_i16, be_u8},
IResult,
};
#[derive(Debug, Clone, PartialEq)]
pub enum SegmentElement<'a> {
SeekHead(SeekHead),
Info(Info),
Tracks(Tracks),
Chapters(Chapters),
Cluster(Cluster<'a>),
Cues(Cues),
Attachments(Attachments),
Tags(Tags),
Void(u64),
Unknown(u64, Option<u64>),
}
// https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.3
named!(pub segment<&[u8], (u64, Option<u64>), Error>,
do_parse!(
id: verify!(vid, |val:&u64| *val == 0x18538067) >>
size: opt!(vint) >>
(id, size)
)
);
#[macro_export]
macro_rules! sub_element(
($i:expr, $parser:ident) => ({
do_parse!($i,
size: vint
>> crc: opt!(ebml_binary!(0xBF))
>> element: flat_map!(take!((size - if crc.is_some() { 6 } else { 0 }) as usize), $parser)
>> (element)
)
});
($i:expr, $submac:ident!( $($args:tt)* )) => ({
do_parse!($i,
size: vint
>> crc: opt!(ebml_binary!(0xBF))
>> element: flat_map!(take!((size - if crc.is_some() { 6 } else { 0 }) as usize), $submac!($($args)*))
>> (element)
)
});
);
// Segment, the root element, has id 0x18538067
named!(pub segment_element<&[u8], SegmentElement, Error>,
switch!(vid,
0x114D9B74 => sub_element!(seek_head)
| 0x1549A966 => sub_element!(info)
| 0x1F43B675 => sub_element!(cluster)
| 0x1043A770 => sub_element!(chapters)
| 0x1254C367 => sub_element!(call!(ret_tags))
| 0x1941A469 => sub_element!(call!(ret_attachments))
| 0x1654AE6B => sub_element!(tracks)
| 0x1C53BB6B => sub_element!(call!(ret_cues))
| 0xEC => do_parse!(size: vint >> take!(size as usize) >> (SegmentElement::Void(size)))
| unknown => do_parse!(
size: opt!(vint) >>
cond!(size.is_some(), take!( (size.unwrap() as usize) )) >>
(SegmentElement::Unknown(unknown, size))
)
)
);
// hack to fix type inference issues
pub fn ret_tags(input: &[u8]) -> IResult<&[u8], SegmentElement, Error> {
Ok((input, SegmentElement::Tags(Tags {})))
}
// hack to fix type inference issues
pub fn ret_attachments(input: &[u8]) -> IResult<&[u8], SegmentElement, Error> {
Ok((input, SegmentElement::Attachments(Attachments {})))
}
// hack to fix type inference issues
pub fn ret_cues(input: &[u8]) -> IResult<&[u8], SegmentElement, Error> {
Ok((input, SegmentElement::Cues(Cues {})))
}
#[derive(Debug, Clone, PartialEq)]
pub struct SeekHead {
pub positions: Vec<Seek>,
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.4
named!(pub seek_head<&[u8], SegmentElement, Error>,
do_parse!(
positions: many1!(complete!(seek)) >>
(SegmentElement::SeekHead(SeekHead {
positions: positions,
}))
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct Seek {
pub id: Vec<u8>,
pub position: u64,
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.4
named!(pub seek<&[u8], Seek, Error>,
ebml_master!(0x4DBB,
do_parse!(
t: permutation_opt!(
ebml_binary!(0x53AB), // SeekID
ebml_uint!(0x53AC) // SeekPosition
) >>
(Seek {
id: t.0,
position: t.1,
})
)
)
);
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Info {
pub segment_uid: Option<Vec<u8>>,
pub segment_filename: Option<String>,
pub prev_uid: Option<Vec<u8>>,
pub prev_filename: Option<String>,
pub next_uid: Option<Vec<u8>>,
pub next_filename: Option<String>,
pub segment_family: Option<Vec<u8>>,
pub chapter_translate: Option<ChapterTranslate>,
pub timecode_scale: u64,
pub duration: Option<f64>, // FIXME should be float
pub date_utc: Option<Vec<u8>>, //FIXME: should be date
pub title: Option<String>,
pub muxing_app: String,
pub writing_app: String,
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.8
named!(pub info<&[u8], SegmentElement, Error>,
do_parse!(
t: permutation_opt!(
ebml_binary!(0x73A4)?, // SegmentUID
ebml_str!(0x7384)?, // SegmentFIlename FIXME SHOULD BE UTF-8 not str
ebml_binary!(0x3CB923)?, // PrevUID
ebml_str!(0x3C83AB)?, // PrevFilename FIXME SHOULD BE UTF-8 not str
ebml_binary!(0x3EB923)?, // NextUID
ebml_str!(0x3E83BB)?, // NextFilename FIXME SHOULD BE UTF-8 not str
ebml_binary!(0x4444)?, // SegmentFamily
chapter_translate?, //
ebml_uint!(0x2AD7B1), // TimecodeScale
ebml_float!(0x4489)?, // Duration: FIXME should be float
ebml_binary!(0x4461)?, // DateUTC FIXME: should be date
ebml_str!(0x7BA9)?, // Title FIXME SHOULD BE UTF-8 not str
ebml_str!(0x4D80), // MuxingApp FIXME SHOULD BE UTF-8 not str
ebml_str!(0x5741) // WritingApp FIXME SHOULD BE UTF-8 not str
) >> (SegmentElement::Info(Info {
segment_uid: t.0,
segment_filename: t.1,
prev_uid: t.2,
prev_filename: t.3,
next_uid: t.4,
next_filename: t.5,
segment_family: t.6,
chapter_translate: t.7,
timecode_scale: t.8,
duration: t.9,
date_utc: t.10,
title: t.11,
muxing_app: t.12,
writing_app: t.13
}))
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct ChapterTranslate {}
// hack to fix type inference issues
pub fn ret_chapter_translate(input: &[u8]) -> IResult<&[u8], ChapterTranslate, Error> {
Ok((input, ChapterTranslate {}))
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.16
named!(pub chapter_translate<&[u8], ChapterTranslate, Error>,
//ebml_master!(0x6924, value!(ChapterTranslate{}))
ebml_master!(0x6924, call!(ret_chapter_translate))
);
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.26
#[derive(Debug, Clone, PartialEq)]
pub struct Cluster<'a> {
pub timecode: u64,
pub silent_tracks: Option<SilentTracks>,
pub position: Option<u64>,
pub prev_size: Option<u64>,
pub simple_block: Vec<&'a [u8]>,
pub block_group: Vec<BlockGroup<'a>>,
pub encrypted_block: Option<&'a [u8]>,
}
named!(pub cluster<&[u8], SegmentElement, Error>,
do_parse!(
t: permutation_opt!(
ebml_uint!(0xE7),
silent_tracks?,
ebml_uint!(0xA7)?,
ebml_uint!(0xAB)?,
ebml_binary_ref!(0xA3)+,
block_group+,
ebml_binary_ref!(0xAF)?
) >> (SegmentElement::Cluster(Cluster {
timecode: t.0,
silent_tracks: t.1,
position: t.2,
prev_size: t.3,
simple_block: t.4,
block_group: t.5,
encrypted_block: t.6,
}))
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct SilentTracks {
pub numbers: Vec<u64>,
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.16
named!(pub silent_tracks<&[u8], SilentTracks, Error>,
ebml_master!(0x5854, map!(many0!(ebml_uint!(0x58D7)), |v| SilentTracks { numbers: v }))
);
#[derive(Debug, Clone, PartialEq)]
pub struct BlockGroup<'a> {
pub block: &'a [u8],
pub block_virtual: Option<Vec<u8>>,
pub block_additions: Option<BlockAdditions>,
pub block_duration: Option<u64>,
pub reference_priority: u64,
pub reference_block: Option<u64>,
pub reference_virtual: Option<i64>,
pub codec_state: Option<Vec<u8>>,
pub discard_padding: Option<i64>,
pub slices: Option<Slices>,
pub reference_frame: Option<ReferenceFrame>,
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.16
//TODO
named!(pub block_group<&[u8], BlockGroup, Error>,
ebml_master!(0x5854,
do_parse!(
t: permutation_opt!(
ebml_binary_ref!(0xA1),
ebml_binary!(0xA2)?,
block_additions?,
ebml_uint!(0x9B)?,
ebml_uint!(0xFA),
ebml_uint!(0xFB)?,
ebml_int!(0xFD)?,
ebml_binary!(0xA4)?,
ebml_int!(0x75A2)?,
slices?,
reference_frame?
) >> (BlockGroup {
block: t.0,
block_virtual: t.1,
block_additions: t.2,
block_duration: t.3,
reference_priority: t.4,
reference_block: t.5,
reference_virtual: t.6,
codec_state: t.7,
discard_padding: t.8,
slices: t.9,
reference_frame: t.10
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct BlockAdditions {}
// hack to fix type inference issues
pub fn ret_block_additions(input: &[u8]) -> IResult<&[u8], BlockAdditions, Error> {
Ok((input, BlockAdditions {}))
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.16
//TODO
named!(pub block_additions<&[u8], BlockAdditions, Error>,
ebml_master!(0x75A1, call!(ret_block_additions))
);
#[derive(Debug, Clone, PartialEq)]
pub struct Slices {}
// hack to fix type inference issues
pub fn ret_slices(input: &[u8]) -> IResult<&[u8], Slices, Error> {
Ok((input, Slices {}))
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.46
//TODO
named!(pub slices<&[u8], Slices, Error>,
ebml_master!(0x8E, call!(ret_slices))
);
#[derive(Debug, Clone, PartialEq)]
pub struct ReferenceFrame {}
// hack to fix type inference issues
pub fn ret_reference_frame(input: &[u8]) -> IResult<&[u8], ReferenceFrame, Error> {
Ok((input, ReferenceFrame {}))
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.53
//TODO
named!(pub reference_frame<&[u8], ReferenceFrame, Error>,
ebml_master!(0xC8, call!(ret_reference_frame))
);
#[derive(Debug, Clone, PartialEq)]
pub struct Block {
pub track_number: u64,
pub timecode: i16,
pub invisible: bool,
pub lacing: Lacing,
}
named!(pub block<&[u8], Block, Error>,
do_parse!(
track_number: vint
>> timecode: be_i16
>> flags: map_opt!(be_u8, block_flags)
>> (Block {
track_number: track_number,
timecode: timecode,
invisible: flags.invisible,
lacing: flags.lacing,
})
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct BlockFlags {
pub keyframe: bool,
pub invisible: bool,
pub lacing: Lacing,
pub discardable: bool,
}
#[derive(Debug, Clone, PartialEq)]
pub struct SimpleBlock {
pub track_number: u64,
pub timecode: i16,
pub keyframe: bool,
pub invisible: bool,
pub lacing: Lacing,
pub discardable: bool,
}
fn block_flags(data: u8) -> Option<BlockFlags> {
let lacing_data = ((data << 6) >> 6) >> 5;
let lacing = match lacing_data {
0 => Lacing::None,
1 => Lacing::Xiph,
2 => Lacing::FixedSize,
3 => Lacing::EBML,
_ => return None,
};
Some(BlockFlags {
keyframe: (data & 1) != 0,
invisible: (data & (1 << 4)) != 0,
lacing: lacing,
discardable: (data & (1 << 7)) != 0,
})
}
named!(pub simple_block<&[u8], SimpleBlock, Error>,
do_parse!(
track_number: vint
>> timecode: be_i16
>> flags: map_opt!(be_u8, block_flags)
>> (SimpleBlock {
track_number: track_number,
timecode: timecode,
keyframe: flags.keyframe,
invisible: flags.invisible,
lacing: flags.lacing,
discardable: flags.discardable,
})
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct SimpleBlockFlags {
pub keyframe: bool,
pub invisible: bool,
pub lacing: Lacing,
pub discardable: bool,
}
#[derive(Debug, Clone, PartialEq)]
pub enum Lacing {
None,
Xiph,
EBML,
FixedSize,
}
#[derive(Debug, Clone, PartialEq)]
pub struct LacedData {
pub frame_count: u8,
}
#[derive(Debug, Clone, PartialEq)]
pub struct Tracks {
pub tracks: Vec<TrackEntry>,
}
impl Tracks {
pub fn lookup(&self, track_number: u64) -> Option<usize> {
self.tracks
.iter()
.find(|t| t.track_number == track_number)
.map(|t| t.stream_index)
}
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.16
named!(pub tracks<&[u8], SegmentElement, Error>,
map!(many1!(complete!(eat_void!(track_entry))), |v| SegmentElement::Tracks(Tracks { tracks: v }))
);
#[derive(Debug, Clone, PartialEq, Default)]
pub struct TrackEntry {
pub track_number: u64,
pub track_uid: u64,
pub track_type: u64,
pub flag_enabled: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub flag_default: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub flag_forced: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub flag_lacing: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub min_cache: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub max_cache: Option<u64>,
pub default_duration: Option<u64>,
pub default_decoded_field_duration: Option<u64>,
pub track_timecode_scale: Option<f64>, //FIXME: this flag is mandatory but does not appear in some files?
pub track_offset: Option<i64>,
pub max_block_addition_id: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub name: Option<String>,
pub language: Option<String>,
pub language_ietf: Option<String>,
pub codec_id: String,
pub codec_private: Option<Vec<u8>>,
pub codec_name: Option<String>,
pub attachment_link: Option<u64>,
pub codec_settings: Option<String>,
pub codec_info_url: Option<String>,
pub codec_download_url: Option<String>,
pub codec_decode_all: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub track_overlay: Option<u64>,
pub codec_delay: Option<u64>,
pub seek_pre_roll: Option<u64>, //FIXME: this flag is mandatory but does not appear in some files?
pub trick_track_uid: Option<u64>,
pub trick_track_segment_uid: Option<Vec<u8>>,
pub trick_track_flag: Option<u64>,
pub trick_master_track_uid: Option<u64>,
pub trick_master_track_segment_uid: Option<Vec<u8>>,
pub video: Option<Video>,
pub audio: Option<Audio>,
pub track_translate: Vec<TrackTranslate>,
pub track_operation: Option<TrackOperation>,
pub content_encodings: Option<ContentEncodings>,
/// The demuxer Stream index matching the Track
pub stream_index: usize,
}
named!(pub track_entry<&[u8], TrackEntry, Error>,
ebml_master!(0xAE,
do_parse!(
t: permutation_opt!(
ebml_uint!(0xD7),
ebml_uint!(0x73C5),
ebml_uint!(0x83),
ebml_uint!(0xB9)?,
ebml_uint!(0x88)?,
ebml_uint!(0x55AA)?,
ebml_uint!(0x9C)?,
ebml_uint!(0x6DE7)?,
ebml_uint!(0x6DF8)?,
ebml_uint!(0x23E383)?,
ebml_uint!(0x234E7A)?,
ebml_float!(0x23314F)?,
ebml_int!(0x537F)?,
ebml_uint!(0x55EE)?,
ebml_str!(0x536E)?,
ebml_str!(0x22B59C)?,
ebml_str!(0x22B59D)?,
ebml_str!(0x86),
ebml_binary!(0x63A2)?,
ebml_str!(0x258688)?,
ebml_uint!(0x7446)?,
ebml_str!(0x3A9697)?,
ebml_str!(0x3B4040)?,
ebml_str!(0x26B240)?,
ebml_uint!(0xAA)?,
ebml_uint!(0x6FAB)?,
ebml_uint!(0x56AA)?,
ebml_uint!(0x56BB)?,
track_translate+,
video?,
audio?,
track_operation?,
ebml_uint!(0xC0)?,
ebml_binary!(0xC1)?,
ebml_uint!(0xC6)?,
ebml_uint!(0xC7)?,
ebml_binary!(0xC4)?,
content_encodings?
) >> (TrackEntry {
track_number: t.0,
track_uid: t.1,
track_type: t.2,
flag_enabled: t.3,
flag_default: t.4,
flag_forced: t.5,
flag_lacing: t.6,
min_cache: t.7,
max_cache: t.8,
default_duration: t.9,
default_decoded_field_duration: t.10,
track_timecode_scale: t.11,
track_offset: t.12,
max_block_addition_id: t.13,
name: t.14,
language: t.15,
language_ietf: t.16,
codec_id: t.17,
codec_private: t.18,
codec_name: t.19,
attachment_link: t.20,
codec_settings: t.21,
codec_info_url: t.22,
codec_download_url: t.23,
codec_decode_all: t.24,
track_overlay: t.25,
codec_delay: t.26,
seek_pre_roll: t.27,
track_translate: t.28,
video: t.29,
audio: t.30,
track_operation: t.31,
trick_track_uid: t.32,
trick_track_segment_uid: t.33,
trick_track_flag: t.34,
trick_master_track_uid: t.35,
trick_master_track_segment_uid: t.36,
content_encodings: t.37,
stream_index: 0,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct TrackTranslate {
pub edition_uid: Vec<u64>,
pub codec: u64,
pub track_id: u64,
}
named!(pub track_translate<&[u8], TrackTranslate, Error>,
ebml_master!(0x6624,
do_parse!(
t: permutation_opt!(
ebml_uint!(0x66FC)+,
ebml_uint!(0x66BF),
ebml_uint!(0x66A5)
) >> (TrackTranslate {
edition_uid: t.0,
codec: t.1,
track_id: t.2,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct TrackOperation {
pub combine_planes: Option<TrackCombinePlanes>,
pub join_blocks: Option<TrackJoinBlocks>,
}
named!(pub track_operation<&[u8], TrackOperation, Error>,
ebml_master!(0xE2,
do_parse!(
t: permutation_opt!(
track_combine_planes?,
track_join_blocks?
) >> (TrackOperation {
combine_planes: t.0,
join_blocks: t.1,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct TrackCombinePlanes {
pub track_planes: Vec<TrackPlane>,
}
named!(pub track_combine_planes<&[u8], TrackCombinePlanes, Error>,
ebml_master!(0xE3, map!(many1!(complete!(track_plane)), |v| TrackCombinePlanes { track_planes: v }))
);
#[derive(Debug, Clone, PartialEq)]
pub struct TrackPlane {
pub uid: u64,
pub plane_type: u64,
}
named!(pub track_plane<&[u8], TrackPlane, Error>,
ebml_master!(0xE4,
do_parse!(
t: permutation_opt!(
ebml_uint!(0xE5),
ebml_uint!(0xE6)
) >> (TrackPlane {
uid: t.0,
plane_type: t.1,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct TrackJoinBlocks {
pub uid: Vec<u64>,
}
named!(pub track_join_blocks<&[u8], TrackJoinBlocks, Error>,
ebml_master!(0xE9, map!(many1!(complete!(ebml_uint!(0xED))), |v| TrackJoinBlocks { uid: v }))
);
#[derive(Debug, Clone, PartialEq)]
pub struct ContentEncodings {
pub content_encoding: Vec<ContentEncoding>,
}
named!(pub content_encodings<&[u8], ContentEncodings, Error>,
ebml_master!(0x6D80, map!(many1!(complete!(content_encoding)), |v| ContentEncodings { content_encoding: v }))
);
#[derive(Debug, Clone, PartialEq)]
pub struct ContentEncoding {
order: u64,
scope: u64,
encoding_type: u64,
compression: Option<ContentCompression>,
encryption: Option<ContentEncryption>,
}
named!(pub content_encoding<&[u8], ContentEncoding, Error>,
ebml_master!(0x6240,
do_parse!(
t: permutation_opt!(
ebml_uint!(0x5031),
ebml_uint!(0x5032),
ebml_uint!(0x5033),
content_compression?,
content_encryption?
) >> (ContentEncoding {
order: t.0,
scope: t.1,
encoding_type: t.2,
compression: t.3,
encryption: t.4
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct ContentCompression {
algo: u64,
settings: Option<u64>,
}
named!(pub content_compression<&[u8], ContentCompression, Error>,
ebml_master!(0x5034,
do_parse!(
t: permutation_opt!(
ebml_uint!(0x4254),
ebml_uint!(0x4255)?
) >> (ContentCompression {
algo: t.0,
settings: t.1,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct ContentEncryption {
enc_algo: Option<u64>,
enc_key_id: Option<Vec<u8>>,
signature: Option<Vec<u8>>,
sig_key_id: Option<Vec<u8>>,
sig_algo: Option<u64>,
sig_hash_algo: Option<u64>,
}
named!(pub content_encryption<&[u8], ContentEncryption, Error>,
ebml_master!(0x5035,
do_parse!(
t: permutation_opt!(
ebml_uint!(0x47E1)?,
ebml_binary!(0x47E2)?,
ebml_binary!(0x47E3)?,
ebml_binary!(0x47E4)?,
ebml_uint!(0x47E5)?,
ebml_uint!(0x47E6)?
) >> (ContentEncryption {
enc_algo: t.0,
enc_key_id: t.1,
signature: t.2,
sig_key_id: t.3,
sig_algo: t.4,
sig_hash_algo: t.5,
})
)
)
);
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Audio {
pub sampling_frequency: f64,
pub output_sampling_frequency: Option<f64>,
pub channels: u64,
pub channel_positions: Option<Vec<u8>>,
pub bit_depth: Option<u64>,
}
named!(pub audio<&[u8], Audio, Error>,
ebml_master!(0xE1,
do_parse!(
t: permutation_opt!(
ebml_float!(0xB5),
ebml_float!(0x78B5)?,
ebml_uint!(0x9F),
ebml_binary!(0x7D7B)?,
ebml_uint!(0x6264)?
) >> (Audio {
sampling_frequency: t.0,
output_sampling_frequency: t.1,
channels: t.2,
channel_positions: t.3,
bit_depth: t.4,
})
)
)
);
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Video {
pub flag_interlaced: Option<u64>,
pub field_order: Option<u64>,
pub stereo_mode: Option<u64>,
pub alpha_mode: Option<u64>,
pub old_stereo_mode: Option<u64>,
pub pixel_width: u64,
pub pixel_height: u64,
pub pixel_crop_bottom: Option<u64>,
pub pixel_crop_top: Option<u64>,
pub pixel_crop_left: Option<u64>,
pub pixel_crop_right: Option<u64>,
pub display_width: Option<u64>,
pub display_height: Option<u64>,
pub display_unit: Option<u64>,
pub aspect_ratio_type: Option<u64>,
pub colour_space: Option<Vec<u8>>,
pub gamma_value: Option<f64>,
pub frame_rate: Option<f64>,
pub colour: Option<Colour>,
pub projection: Option<Projection>,
}
named!(pub video<&[u8], Video, Error>,
ebml_master!(0xE0,
do_parse!(
t: permutation_opt!(
ebml_uint!(0x9A)?,
ebml_uint!(0x9D)?,
ebml_uint!(0x53B8)?,
ebml_uint!(0x53C0)?,
ebml_uint!(0x53B9)?,
ebml_uint!(0xB0),
ebml_uint!(0xBA),
ebml_uint!(0x54AA)?,
ebml_uint!(0x54BB)?,
ebml_uint!(0x54CC)?,
ebml_uint!(0x54DD)?,
ebml_uint!(0x54B0)?,
ebml_uint!(0x54BA)?,
ebml_uint!(0x54B2)?,
ebml_uint!(0x54B3)?,
ebml_binary!(0x2EB524)?,
ebml_float!(0x2FB523)?,
ebml_float!(0x2383E3)?,
colour?,
projection?
) >> (Video {
flag_interlaced: t.0,
field_order: t.1,
stereo_mode: t.2,
alpha_mode: t.3,
old_stereo_mode: t.4,
pixel_width: t.5,
pixel_height: t.6,
pixel_crop_bottom: t.7,
pixel_crop_top: t.8,
pixel_crop_left: t.9,
pixel_crop_right: t.10,
display_width: t.11,
display_height: t.12,
display_unit: t.13,
aspect_ratio_type: t.14,
colour_space: t.15,
gamma_value: t.16,
frame_rate: t.17,
colour: t.18,
projection: t.19,
})
)
)
);
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Colour {
pub matrix_coefficients: Option<u64>,
pub bits_per_channel: Option<u64>,
pub chroma_subsampling_horz: Option<u64>,
pub chroma_subsampling_vert: Option<u64>,
pub cb_subsampling_horz: Option<u64>,
pub cb_subsampling_vert: Option<u64>,
pub chroma_siting_horz: Option<u64>,
pub chroma_siting_vert: Option<u64>,
pub range: Option<u64>,
pub transfer_characteristics: Option<u64>,
pub primaries: Option<u64>,
pub max_cll: Option<u64>,
pub max_fall: Option<u64>,
pub mastering_metadata: Option<MasteringMetadata>,
}
named!(pub colour<&[u8], Colour, Error>,
ebml_master!(0x55B0,
do_parse!(
t: permutation_opt!(
ebml_uint!(0x55B1)?,
ebml_uint!(0x55B2)?,
ebml_uint!(0x55B3)?,
ebml_uint!(0x55B4)?,
ebml_uint!(0x55B5)?,
ebml_uint!(0x55B6)?,
ebml_uint!(0x55B7)?,
ebml_uint!(0x55B8)?,
ebml_uint!(0x55B9)?,
ebml_uint!(0x55BA)?,
ebml_uint!(0x55BB)?,
ebml_uint!(0x55BC)?,
ebml_uint!(0x55BD)?,
mastering_metadata?
) >> (Colour {
matrix_coefficients: t.0,
bits_per_channel: t.1,
chroma_subsampling_horz: t.2,
chroma_subsampling_vert: t.3,
cb_subsampling_horz: t.4,
cb_subsampling_vert: t.5,
chroma_siting_horz: t.6,
chroma_siting_vert: t.7,
range: t.8,
transfer_characteristics: t.9,
primaries: t.10,
max_cll: t.11,
max_fall: t.12,
mastering_metadata: t.13,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct MasteringMetadata {
pub primary_r_chromaticity_x: Option<f64>,
pub primary_r_chromaticity_y: Option<f64>,
pub primary_g_chromaticity_x: Option<f64>,
pub primary_g_chromaticity_y: Option<f64>,
pub primary_b_chromaticity_x: Option<f64>,
pub primary_b_chromaticity_y: Option<f64>,
pub white_point_chromaticity_x: Option<f64>,
pub white_point_chromaticity_y: Option<f64>,
pub luminance_max: Option<f64>,
pub luminance_min: Option<f64>,
}
named!(pub mastering_metadata<&[u8], MasteringMetadata, Error>,
ebml_master!(0x55D0,
do_parse!(
t: permutation_opt!(
ebml_float!(0x55D1)?,
ebml_float!(0x55D2)?,
ebml_float!(0x55D3)?,
ebml_float!(0x55D4)?,
ebml_float!(0x55D5)?,
ebml_float!(0x55D6)?,
ebml_float!(0x55D7)?,
ebml_float!(0x55D8)?,
ebml_float!(0x55D9)?,
ebml_float!(0x55DA)?
) >> (MasteringMetadata {
primary_r_chromaticity_x: t.0,
primary_r_chromaticity_y: t.1,
primary_g_chromaticity_x: t.2,
primary_g_chromaticity_y: t.3,
primary_b_chromaticity_x: t.4,
primary_b_chromaticity_y: t.5,
white_point_chromaticity_x: t.6,
white_point_chromaticity_y: t.7,
luminance_max: t.8,
luminance_min: t.9,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct Projection {
pub projection_type: u64,
pub projection_private: Option<Vec<u8>>,
pub projection_pose_yaw: f64,
pub projection_pose_pitch: f64,
pub projection_pose_roll: f64,
}
named!(pub projection<&[u8], Projection, Error>,
ebml_master!(0x7670,
do_parse!(
t: permutation_opt!(
ebml_uint!(0x7671),
ebml_binary!(0x7672)?,
ebml_float!(0x7673),
ebml_float!(0x7674),
ebml_float!(0x7675)
) >> (Projection {
projection_type: t.0,
projection_private: t.1,
projection_pose_yaw: t.2,
projection_pose_pitch: t.3,
projection_pose_roll: t.4,
})
)
)
);
#[derive(Debug, Clone, PartialEq)]
pub struct Chapters {}
// hack to fix type inference issues
pub fn ret_chapters(input: &[u8]) -> IResult<&[u8], SegmentElement, Error> {
Ok((input, SegmentElement::Chapters(Chapters {})))
}
//https://datatracker.ietf.org/doc/html/draft-lhomme-cellar-matroska-03#section-7.3.199
//TODO
named!(pub chapters<&[u8], SegmentElement, Error>,
//EditionEntry
ebml_master!(0x45B9, call!(ret_chapters))
);
#[derive(Debug, Clone, PartialEq)]
pub struct Cues {}
#[derive(Debug, Clone, PartialEq)]
pub struct Attachments {}
#[derive(Debug, Clone, PartialEq)]
pub struct Tags {}
#[cfg(test)]
#[allow(non_upper_case_globals)]
mod tests {
use super::*;
use log::debug;
use nom::{HexDisplay, Offset};
use std::cmp::min;
const mkv: &'static [u8] = include_bytes!("../assets/single_stream.mkv");
const webm: &'static [u8] = include_bytes!("../assets/big-buck-bunny_trailer.webm");
#[test]
fn mkv_segment_root() {
let res = segment(&mkv[47..100]);
debug!("{:?}", res);
if let Ok((i, _)) = res {
debug!("consumed {} bytes after header", (&mkv[47..]).offset(i));
} else {
panic!("res: {:?}", res);
}
}
#[test]
fn mkv_segment_elements() {
let mut index: usize = 59;
loop {
let res = segment_element(&mkv[index..]);
match res {
Ok((i, o)) => {
let new_index = mkv.offset(i);
match o {
SegmentElement::Unknown(id, size) => {
debug!(
"[{} -> {}] Unknown {{ id: 0x{:x}, size: {:?} }}",
index, new_index, id, size
);
}
o => {
debug!("[{} -> {}] {:#?}", index, new_index, o);
}
};
index = new_index as usize;
}
e => {
let max_index = min(mkv.len(), index + 200);
debug!(
"[{}] {:#?}:\n{}",
index,
e,
(&mkv[index..max_index]).to_hex(16)
);
break;
}
}
}
//panic!();
}
#[test]
fn webm_segment_root() {
let res = segment(&webm[40..100]);
debug!("{:?}", res);
if let Ok((i, _)) = res {
debug!("consumed {} bytes after header", (&webm[40..]).offset(i));
} else {
panic!("res: {:?}", res);
}
}
#[test]
fn webm_segment_elements() {
let mut index: usize = 48;
loop {
let res = segment_element(&webm[index..]);
match res {
Ok((i, o)) => {
let new_index = webm.offset(i);
match o {
SegmentElement::Unknown(id, size) => {
debug!(
"[{} -> {}] Unknown {{ id: 0x{:x}, size: {:?} }}",
index, new_index, id, size
);
}
o => {
debug!("[{} -> {}] {:#?}", index, new_index, o);
}
};
index = new_index as usize;
}
e => {
let max_index = min(webm.len(), index + 200);
debug!(
"[{}] {:#?}:\n{}",
index,
e,
(&webm[index..max_index]).to_hex(16)
);
break;
}
}
}
}
}
| 28.227232 | 111 | 0.588487 |
648453fe38b7158e82ffb71720c459c1eaaa26ee | 971 | #![allow(dead_code)]
/// A [`try`]-like macro to workaround the [`Option`]/[`Result`] nested APIs.
/// These API require checking the type and then calling the appropriate getter function
/// (which returns a None if you got it wrong). This macro turns the `None` into
/// an `IonError` which cannot be currently done with `?`.
macro_rules! try_to {
($getter:expr) => {
match $getter {
Some(value) => value,
None => illegal_operation(format!("Missing a value: {}", stringify!($getter)))?,
}
};
}
pub mod result;
pub mod binary;
pub mod cursor;
pub mod data_source;
pub mod text;
pub mod types;
pub mod value;
pub mod constants;
mod reader;
mod symbol_table;
mod system_event_handler;
pub use binary::cursor::BinaryIonCursor;
pub use cursor::Cursor;
pub use data_source::IonDataSource;
pub use reader::Reader;
pub use symbol_table::SymbolTable;
pub use system_event_handler::SystemEventHandler;
pub use types::IonType;
| 26.243243 | 92 | 0.697219 |
6969f4c5ecfd179c5ef58ecd4a829558d3016b6c | 1,674 | /*
* Ory APIs
*
* Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers.
*
* The version of the OpenAPI document: v0.0.1-alpha.71
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
/// SubmitSelfServiceRegistrationFlowWithPasswordMethodBody : SubmitSelfServiceRegistrationFlowWithPasswordMethodBody is used to decode the registration form payload when using the password method.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubmitSelfServiceRegistrationFlowWithPasswordMethodBody {
/// The CSRF Token
#[serde(rename = "csrf_token", skip_serializing_if = "Option::is_none")]
pub csrf_token: Option<String>,
/// Method to use This field must be set to `password` when using the password method.
#[serde(rename = "method")]
pub method: String,
/// Password to sign the user up with
#[serde(rename = "password")]
pub password: String,
/// The identity's traits
#[serde(rename = "traits")]
pub traits: serde_json::Value,
}
impl SubmitSelfServiceRegistrationFlowWithPasswordMethodBody {
/// SubmitSelfServiceRegistrationFlowWithPasswordMethodBody is used to decode the registration form payload when using the password method.
pub fn new(method: String, password: String, traits: serde_json::Value) -> SubmitSelfServiceRegistrationFlowWithPasswordMethodBody {
SubmitSelfServiceRegistrationFlowWithPasswordMethodBody {
csrf_token: None,
method,
password,
traits,
}
}
}
| 38.045455 | 197 | 0.729988 |
75006ae869636416fa0beb22450254858d9a517b | 5,214 | #![allow(unused_imports)]
use super::*;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "C" {
# [wasm_bindgen (extends = :: js_sys :: Object , js_name = SpeechRecognitionEventInit)]
#[derive(Debug, Clone, PartialEq, Eq)]
#[doc = "The `SpeechRecognitionEventInit` dictionary."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`*"]
pub type SpeechRecognitionEventInit;
}
impl SpeechRecognitionEventInit {
#[doc = "Construct a new `SpeechRecognitionEventInit`."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`*"]
pub fn new() -> Self {
#[allow(unused_mut)]
let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new());
ret
}
#[doc = "Change the `bubbles` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`*"]
pub fn bubbles(&mut self, val: bool) -> &mut Self {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(
self.as_ref(),
&JsValue::from("bubbles"),
&JsValue::from(val),
);
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
}
#[doc = "Change the `cancelable` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`*"]
pub fn cancelable(&mut self, val: bool) -> &mut Self {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(
self.as_ref(),
&JsValue::from("cancelable"),
&JsValue::from(val),
);
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
}
#[doc = "Change the `composed` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`*"]
pub fn composed(&mut self, val: bool) -> &mut Self {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(
self.as_ref(),
&JsValue::from("composed"),
&JsValue::from(val),
);
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
}
#[cfg(feature = "Document")]
#[doc = "Change the `emma` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `Document`, `SpeechRecognitionEventInit`*"]
pub fn emma(&mut self, val: Option<&Document>) -> &mut Self {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("emma"), &JsValue::from(val));
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
}
#[doc = "Change the `interpretation` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`*"]
pub fn interpretation(&mut self, val: &::wasm_bindgen::JsValue) -> &mut Self {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(
self.as_ref(),
&JsValue::from("interpretation"),
&JsValue::from(val),
);
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
}
#[doc = "Change the `resultIndex` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`*"]
pub fn result_index(&mut self, val: u32) -> &mut Self {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(
self.as_ref(),
&JsValue::from("resultIndex"),
&JsValue::from(val),
);
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
}
#[cfg(feature = "SpeechRecognitionResultList")]
#[doc = "Change the `results` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `SpeechRecognitionEventInit`, `SpeechRecognitionResultList`*"]
pub fn results(&mut self, val: Option<&SpeechRecognitionResultList>) -> &mut Self {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(
self.as_ref(),
&JsValue::from("results"),
&JsValue::from(val),
);
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
}
}
| 37.242857 | 140 | 0.571346 |
f717c2145640113ccee34de66fff3d29f50bef72 | 6,350 | use std::io;
use std::path::{self, Path, PathBuf};
use std::process;
use fs_err as fs;
use structopt::clap::crate_name;
use structopt::StructOpt;
use crate::cli;
use crate::command::{FakeRunner, ProcessRunner, Runner};
use crate::config::{Config, ConfigMount, ConfigSnapshot};
use crate::error::Result;
use crate::lvm;
use crate::mount::{mount, unmount};
fn load_config<P>(config_path: P) -> Result<Config>
where
P: AsRef<Path>,
{
log::debug!(
"Loading configuration file {}",
config_path.as_ref().display()
);
let config_file = fs::File::open(config_path.as_ref())?;
Config::load(config_file)
}
fn create_snapshot<R>(runner: &R, config_snap: &ConfigSnapshot) -> Result<()>
where
R: Runner,
{
match config_snap {
ConfigSnapshot::Lvm { source, name, size } => {
let source_lv = lvm::LogicalVolume::from_path(source);
source_lv.snapshot(runner, name, size)?;
}
}
Ok(())
}
fn remove_snapshot<R>(runner: &R, config_snap: &ConfigSnapshot) -> Result<()>
where
R: Runner,
{
match config_snap {
ConfigSnapshot::Lvm { source, name, .. } => {
let target_lv = lvm::LogicalVolume::from_path(source).with_name(name);
target_lv.remove(runner)?;
}
}
Ok(())
}
fn get_mount_target<P>(toplevel: P, config_mount: &ConfigMount) -> PathBuf
where
P: AsRef<Path>,
{
let root_dir: &Path = path::Component::RootDir.as_ref();
let target = config_mount.target.as_ref().unwrap_or(&config_mount.source);
if target == root_dir {
toplevel.as_ref().to_path_buf()
} else {
toplevel
.as_ref()
.join(target.strip_prefix(root_dir).unwrap_or(target))
}
}
fn create_mount<P, R>(runner: &R, toplevel: P, config_mount: &ConfigMount) -> Result<()>
where
P: AsRef<Path>,
R: Runner,
{
if config_mount.if_exists && !config_mount.source.exists() {
log::info!(
"Skipping mount: {} does not exist",
&config_mount.source.display()
);
return Ok(());
}
let target = get_mount_target(toplevel, config_mount);
mount(
runner,
&config_mount.source,
&target,
config_mount.type_.as_deref(),
&config_mount.options,
)
}
fn remove_mount<P, R>(runner: &R, toplevel: P, config_mount: &ConfigMount) -> Result<()>
where
P: AsRef<Path>,
R: Runner,
{
let target = get_mount_target(toplevel, config_mount);
unmount(runner, target)
}
fn mount_all<R>(runner: &R, toplevel: &Path, config: &Config) -> Result<()>
where
R: Runner,
{
for config_snap in config.snapshots.iter() {
create_snapshot(runner, config_snap)?;
}
for config_mount in config.mounts.iter() {
create_mount(runner, toplevel, config_mount)?;
}
Ok(())
}
fn unmount_all<R>(runner: &R, toplevel: &Path, config: &Config) -> Result<()>
where
R: Runner,
{
for config_mount in config.mounts.iter().rev() {
remove_mount(runner, toplevel, config_mount)?;
}
for config_snap in config.snapshots.iter().rev() {
remove_snapshot(runner, config_snap)?;
}
Ok(())
}
fn unmount_mount_all<R>(
runner: &R,
unmount_before: bool,
toplevel: &Path,
config: &Config,
) -> Result<()>
where
R: Runner,
{
if unmount_before {
unmount_all(runner, toplevel, config)?;
}
mount_all(runner, toplevel, config)?;
Ok(())
}
#[inline]
fn log_done() {
log::info!("All done");
}
trait CliCommand {
fn run<R>(&self, runner: &R, config_path: &Path) -> Result<()>
where
R: Runner;
}
impl CliCommand for cli::ArgsCommandMount {
fn run<R>(&self, runner: &R, config_path: &Path) -> Result<()>
where
R: Runner,
{
let config = load_config(config_path)?;
unmount_mount_all(runner, self.unmount_before, &self.target, &config)?;
log_done();
Ok(())
}
}
impl CliCommand for cli::ArgsCommandUnmount {
fn run<R>(&self, runner: &R, config_path: &Path) -> Result<()>
where
R: Runner,
{
let config = load_config(config_path)?;
unmount_all(runner, &self.target, &config)?;
log_done();
Ok(())
}
}
impl CliCommand for cli::ArgsCommandRun {
fn run<R>(&self, runner: &R, config_path: &Path) -> Result<()>
where
R: Runner,
{
let config = load_config(config_path)?;
unmount_mount_all(runner, self.unmount_before, &self.target, &config)?;
let mut command = process::Command::new(&self.program);
command.args(&self.args);
let status = runner.run(&mut command)?;
unmount_all(runner, &self.target, &config)?;
log_done();
process::exit(status.code().unwrap_or_default())
}
}
impl CliCommand for cli::ArgsCommandConfig {
fn run<R>(&self, _runner: &R, config_path: &Path) -> Result<()>
where
R: Runner,
{
let config = load_config(config_path)?;
let stdout = io::stdout();
config.dump(stdout)?;
Ok(())
}
}
impl CliCommand for cli::ArgsCommandCompletion {
fn run<R>(&self, _runner: &R, _config_path: &Path) -> Result<()>
where
R: Runner,
{
cli::Args::clap().gen_completions_to(crate_name!(), self.shell, &mut io::stdout());
Ok(())
}
}
impl CliCommand for cli::ArgsCommand {
fn run<R>(&self, runner: &R, config_path: &Path) -> Result<()>
where
R: Runner,
{
match self {
cli::ArgsCommand::Mount(cmd) => cmd.run(runner, config_path),
cli::ArgsCommand::Unmount(cmd) => cmd.run(runner, config_path),
cli::ArgsCommand::Run(cmd) => cmd.run(runner, config_path),
cli::ArgsCommand::Config(cmd) => cmd.run(runner, config_path),
cli::ArgsCommand::Completion(cmd) => cmd.run(runner, config_path),
}
}
}
pub fn main(args: &cli::Args) {
env_logger::Builder::new()
.format_module_path(false)
.format_timestamp(None)
.filter_level(args.log_level)
.init();
if let Err(e) = if args.dry_run {
args.command.run(&FakeRunner, &args.config_path)
} else {
args.command.run(&ProcessRunner, &args.config_path)
} {
log::error!("{}: {:?}", e, e);
process::exit(1);
}
}
| 25.918367 | 91 | 0.595591 |
4a3979a89895eb961866401216e81a09e55b6bcb | 10,098 | //! zkSync network block definition.
use super::PriorityOp;
use super::ZkSyncOp;
use super::{AccountId, BlockNumber, Fr};
use crate::SignedZkSyncTx;
use chrono::DateTime;
use chrono::Utc;
use serde::{Deserialize, Serialize};
use zksync_basic_types::{H256, U256};
use zksync_crypto::franklin_crypto::bellman::pairing::ff::{PrimeField, PrimeFieldRepr};
use zksync_crypto::params::CHUNK_BIT_WIDTH;
use zksync_crypto::serialization::FrSerde;
/// An intermediate state of the block in the zkSync network.
/// Contains the information about (so far) executed transactions and
/// meta-information related to the block creating process.
#[derive(Clone, Debug)]
pub struct PendingBlock {
/// Block ID.
pub number: BlockNumber,
/// Amount of chunks left in the block.
pub chunks_left: usize,
/// ID of the first unprocessed priority operation at the moment
/// of the block initialization.
pub unprocessed_priority_op_before: u64,
/// Amount of processing iterations applied to the pending block.
/// If this amount exceeds the limit configured in the server, block will be
/// sealed even if it's not full.
pub pending_block_iteration: usize,
/// List of successfully executed operations.
pub success_operations: Vec<ExecutedOperations>,
/// Lit of failed operations.
pub failed_txs: Vec<ExecutedTx>,
}
/// Executed L2 transaction.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ExecutedTx {
pub signed_tx: SignedZkSyncTx,
pub success: bool,
pub op: Option<ZkSyncOp>,
pub fail_reason: Option<String>,
pub block_index: Option<u32>,
pub created_at: DateTime<Utc>,
pub batch_id: Option<i64>,
}
/// Executed L1 priority operation.
/// Unlike L2 transactions, L1 priority operations cannot fail in L2.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ExecutedPriorityOp {
pub priority_op: PriorityOp,
pub op: ZkSyncOp,
pub block_index: u32,
pub created_at: DateTime<Utc>,
}
/// Representation of executed operation, which can be either L1 or L2.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum ExecutedOperations {
Tx(Box<ExecutedTx>),
PriorityOp(Box<ExecutedPriorityOp>),
}
impl ExecutedOperations {
/// Returns the `ZkSyncOp` object associated with the operation, if any.
pub fn get_executed_op(&self) -> Option<&ZkSyncOp> {
match self {
ExecutedOperations::Tx(exec_tx) => exec_tx.op.as_ref(),
ExecutedOperations::PriorityOp(exec_op) => Some(&exec_op.op),
}
}
/// Attempts to get the executed L1 transaction.
pub fn get_executed_tx(&self) -> Option<&ExecutedTx> {
match self {
ExecutedOperations::Tx(exec_tx) => Some(exec_tx),
ExecutedOperations::PriorityOp(_) => None,
}
}
/// Returns the public data required for the Ethereum smart contract to commit the operation.
pub fn get_eth_public_data(&self) -> Vec<u8> {
self.get_executed_op()
.map(ZkSyncOp::public_data)
.unwrap_or_default()
}
/// Gets the witness required for the Ethereum smart contract.
/// Unlike public data, some operations may not have a witness.
pub fn get_eth_witness_bytes(&self) -> Option<Vec<u8>> {
self.get_executed_op()
.map(|op| op.eth_witness().unwrap_or_else(Vec::new))
}
/// Returns the list of accounts affected by the operation.
pub fn get_updated_account_ids(&self) -> Vec<AccountId> {
self.get_executed_op()
.map(|op| op.get_updated_account_ids())
.unwrap_or_else(Vec::new)
}
}
/// zkSync network block.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Block {
/// Block ID.
pub block_number: BlockNumber,
/// Chain root hash obtained after executing this block.
#[serde(with = "FrSerde")]
pub new_root_hash: Fr,
/// ID of the zkSync account to which fees are collected.
pub fee_account: AccountId,
/// List of operations executed in the block. Includes both L1 and L2 operations.
pub block_transactions: Vec<ExecutedOperations>,
/// A tuple of ID of the first unprocessed priority operation before and after this block.
pub processed_priority_ops: (u64, u64),
/// Actual block chunks amount that will be used on contract, such that `block_chunks_sizes >= block.chunks_used()`.
/// Server and provers may support blocks of several different sizes, and this value must be equal to one of the
/// supported size values.
pub block_chunks_size: usize,
/// Gas limit to be set for the Commit Ethereum transaction.
pub commit_gas_limit: U256,
/// Gas limit to be set for the Verify Ethereum transaction.
pub verify_gas_limit: U256,
}
impl Block {
/// Creates a new `Block` object.
#[allow(clippy::too_many_arguments)]
pub fn new(
block_number: BlockNumber,
new_root_hash: Fr,
fee_account: AccountId,
block_transactions: Vec<ExecutedOperations>,
processed_priority_ops: (u64, u64),
block_chunks_size: usize,
commit_gas_limit: U256,
verify_gas_limit: U256,
) -> Self {
Self {
block_number,
new_root_hash,
fee_account,
block_transactions,
processed_priority_ops,
block_chunks_size,
commit_gas_limit,
verify_gas_limit,
}
}
/// Creates a new block, choosing the smallest supported block size which will fit
/// all the executed transactions.
///
/// # Panics
///
/// Panics if there is no supported block size to fit all the transactions.
#[allow(clippy::too_many_arguments)]
pub fn new_from_available_block_sizes(
block_number: BlockNumber,
new_root_hash: Fr,
fee_account: AccountId,
block_transactions: Vec<ExecutedOperations>,
processed_priority_ops: (u64, u64),
available_block_chunks_sizes: &[usize],
commit_gas_limit: U256,
verify_gas_limit: U256,
) -> Self {
let mut block = Self {
block_number,
new_root_hash,
fee_account,
block_transactions,
processed_priority_ops,
block_chunks_size: 0,
commit_gas_limit,
verify_gas_limit,
};
block.block_chunks_size = block.smallest_block_size(available_block_chunks_sizes);
block
}
/// Returns the new state root hash encoded for the Ethereum smart contract.
pub fn get_eth_encoded_root(&self) -> H256 {
let mut be_bytes = [0u8; 32];
self.new_root_hash
.into_repr()
.write_be(be_bytes.as_mut())
.expect("Write commit bytes");
H256::from(be_bytes)
}
/// Returns the public data for the Ethereum Commit operation.
pub fn get_eth_public_data(&self) -> Vec<u8> {
let mut executed_tx_pub_data = self
.block_transactions
.iter()
.filter_map(ExecutedOperations::get_executed_op)
.flat_map(ZkSyncOp::public_data)
.collect::<Vec<_>>();
// Pad block with noops.
executed_tx_pub_data.resize(self.block_chunks_size * CHUNK_BIT_WIDTH / 8, 0x00);
executed_tx_pub_data
}
/// Returns eth_witness data and data_size for each operation that has it.
pub fn get_eth_witness_data(&self) -> (Vec<u8>, Vec<u64>) {
let mut eth_witness = Vec::new();
let mut used_bytes = Vec::new();
for block_tx in &self.block_transactions {
if let Some(franklin_op) = block_tx.get_executed_op() {
if let Some(witness_bytes) = franklin_op.eth_witness() {
used_bytes.push(witness_bytes.len() as u64);
eth_witness.extend(witness_bytes.into_iter());
}
}
}
(eth_witness, used_bytes)
}
/// Returns the number of priority operations processed in this block.
pub fn number_of_processed_prior_ops(&self) -> u64 {
self.processed_priority_ops.1 - self.processed_priority_ops.0
}
fn chunks_used(&self) -> usize {
self.block_transactions
.iter()
.filter_map(ExecutedOperations::get_executed_op)
.map(ZkSyncOp::chunks)
.sum()
}
fn smallest_block_size(&self, available_block_sizes: &[usize]) -> usize {
let chunks_used = self.chunks_used();
smallest_block_size_for_chunks(chunks_used, available_block_sizes)
}
/// Returns the number of Withdrawal and ForcedExit in a block.
pub fn get_withdrawals_count(&self) -> usize {
let mut withdrawals_count = 0;
for block_tx in &self.block_transactions {
if let Some(sync_op) = block_tx.get_executed_op() {
if sync_op.withdrawal_data().is_some() {
withdrawals_count += 1;
}
}
}
withdrawals_count
}
/// Returns the data about withdrawals required for the Ethereum smart contract.
pub fn get_withdrawals_data(&self) -> Vec<u8> {
let mut withdrawals_data = Vec::new();
for block_tx in &self.block_transactions {
if let Some(franklin_op) = block_tx.get_executed_op() {
if let Some(withdrawal_data) = franklin_op.withdrawal_data() {
withdrawals_data.extend(&withdrawal_data);
}
}
}
withdrawals_data
}
}
/// Gets smallest block size given the list of supported chunk sizes.
pub fn smallest_block_size_for_chunks(
chunks_used: usize,
available_block_sizes: &[usize],
) -> usize {
for &block_size in available_block_sizes {
if block_size >= chunks_used {
return block_size;
}
}
panic!(
"Provided chunks amount ({}) cannot fit in one block, maximum available size is {}",
chunks_used,
available_block_sizes.last().unwrap()
);
}
| 34.346939 | 120 | 0.645771 |
b9bd6549aa99c626fffa053b5bb78538e26e9b25 | 42,555 | #![allow(unused_variables, dead_code)]
use azul_css::{
StyleTextAlignmentHorz, StyleTextAlignmentVert, ScrollbarInfo,
};
pub use webrender::api::{
GlyphInstance, LayoutSize, LayoutRect, LayoutPoint,
};
pub use harfbuzz_sys::{hb_glyph_info_t as GlyphInfo, hb_glyph_position_t as GlyphPosition};
pub type WordIndex = usize;
pub type GlyphIndex = usize;
pub type LineLength = f32;
pub type IndexOfLineBreak = usize;
pub type RemainingSpaceToRight = f32;
pub type LineBreaks = Vec<(GlyphIndex, RemainingSpaceToRight)>;
const DEFAULT_LINE_HEIGHT: f32 = 1.0;
const DEFAULT_WORD_SPACING: f32 = 1.0;
const DEFAULT_LETTER_SPACING: f32 = 0.0;
const DEFAULT_TAB_WIDTH: f32 = 4.0;
/// Text broken up into `Tab`, `Word()`, `Return` characters
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Words {
pub items: Vec<Word>,
// NOTE: Can't be a string, because it wouldn't be possible to take substrings
// (since in UTF-8, multiple characters can be encoded in one byte).
internal_str: String,
internal_chars: Vec<char>,
}
impl Words {
pub fn get_substr(&self, word: &Word) -> String {
self.internal_chars[word.start..word.end].iter().collect()
}
pub fn get_str(&self) -> &str {
&self.internal_str
}
pub fn get_char(&self, idx: usize) -> Option<char> {
self.internal_chars.get(idx).cloned()
}
}
/// Section of a certain type
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Word {
pub start: usize,
pub end: usize,
pub word_type: WordType,
}
/// Either a white-space delimited word, tab or return character
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum WordType {
/// Encountered a word (delimited by spaces)
Word,
// `\t` or `x09`
Tab,
/// `\r`, `\n` or `\r\n`, escaped: `\x0D`, `\x0A` or `\x0D\x0A`
Return,
/// Space character
Space,
}
/// A paragraph of words that are shaped and scaled (* but not yet layouted / positioned*!)
/// according to their final size in pixels.
#[derive(Debug, Clone)]
pub struct ScaledWords {
/// Font size (in pixels) that was used to scale these words
pub font_size_px: f32,
/// Words scaled to their appropriate font size, but not yet positioned on the screen
pub items: Vec<ScaledWord>,
/// Longest word in the `self.scaled_words`, necessary for
/// calculating overflow rectangles.
pub longest_word_width: f32,
/// Horizontal advance of the space glyph
pub space_advance_px: f32,
/// Glyph index of the space character
pub space_codepoint: u32,
}
/// Word that is scaled (to a font / font instance), but not yet positioned
#[derive(Debug, Clone)]
pub struct ScaledWord {
/// Glyphs, positions are relative to the first character of the word
pub glyph_infos: Vec<GlyphInfo>,
/// Horizontal advances of each glyph, necessary for
/// hit-testing characters later on (for text selection).
pub glyph_positions: Vec<GlyphPosition>,
/// The sum of the width of all the characters in this word
pub word_width: f32,
}
/// Stores the positions of the vertically laid out texts
#[derive(Debug, Clone, PartialEq)]
pub struct WordPositions {
/// Font size that was used to layout this text (value in pixels)
pub font_size_px: f32,
/// Options like word spacing, character spacing, etc. that were
/// used to layout these glyphs
pub text_layout_options: TextLayoutOptions,
/// Stores the positions of words.
pub word_positions: Vec<LayoutPoint>,
/// Index of the word at which the line breaks + length of line
/// (useful for text selection + horizontal centering)
pub line_breaks: Vec<(WordIndex, LineLength)>,
/// Horizontal width of the last line (in pixels), necessary for inline layout later on,
/// so that the next text run can contine where the last text run left off.
///
/// Usually, the "trailing" of the current text block is the "leading" of the
/// next text block, to make it seem like two text runs push into each other.
pub trailing: f32,
/// How many words are in the text?
pub number_of_words: usize,
/// How many lines (NOTE: virtual lines, meaning line breaks in the layouted text) are there?
pub number_of_lines: usize,
/// Horizontal and vertical boundaries of the layouted words.
///
/// Note that the vertical extent can be larger than the last words' position,
/// because of trailing negative glyph advances.
pub content_size: LayoutSize,
}
/// Width and height of the scrollbars at the side of the text field.
///
/// This information is necessary in order to reserve space at
/// the side of the text field so that the text doesn't overlap the scrollbar.
/// In some cases (when the scrollbar is set to "auto"), the scrollbar space
/// is only taken up when the text overflows the rectangle itself.
#[derive(Debug, Default, Clone, PartialEq, PartialOrd)]
pub struct ScrollbarStyle {
/// Vertical scrollbar style, if any
pub horizontal: Option<ScrollbarInfo>,
/// Horizontal scrollbar style, if any
pub vertical: Option<ScrollbarInfo>,
}
/// Layout options that can impact the flow of word positions
#[derive(Debug, Clone, PartialEq, Default)]
pub struct TextLayoutOptions {
/// Multiplier for the line height, default to 1.0
pub line_height: Option<f32>,
/// Additional spacing between glyphs (in pixels)
pub letter_spacing: Option<f32>,
/// Additional spacing between words (in pixels)
pub word_spacing: Option<f32>,
/// How many spaces should a tab character emulate
/// (multiplying value, i.e. `4.0` = one tab = 4 spaces)?
pub tab_width: Option<f32>,
/// Maximum width of the text (in pixels) - if the text is set to `overflow:visible`, set this to None.
pub max_horizontal_width: Option<f32>,
/// How many pixels of leading does the first line have? Note that this added onto to the holes,
/// so for effects like `:first-letter`, use a hole instead of a leading.
pub leading: Option<f32>,
/// This is more important for inline text layout where items can punch "holes"
/// into the text flow, for example an image that floats to the right.
///
/// TODO: Currently unused!
pub holes: Vec<LayoutRect>,
}
/// Given the scale of words + the word positions, lays out the words in a
#[derive(Debug, Clone, PartialEq)]
pub struct LeftAlignedGlyphs<'a> {
/// Width that was used to layout these glyphs (or None if the text has overflow:visible)
pub max_horizontal_width: Option<f32>,
/// Actual glyph instances, copied
pub glyphs: Vec<&'a GlyphInstance>,
/// Rectangles of the different lines, necessary for text selection
/// and hovering over text, etc.
pub line_rects: &'a Vec<LayoutRect>,
/// Horizontal and vertical extent of the text
pub text_bbox: LayoutSize,
}
/// Returns the layouted glyph instances
#[derive(Debug, Clone, PartialEq)]
pub struct LayoutedGlyphs {
pub glyphs: Vec<GlyphInstance>,
}
/// Whether the text overflows the parent rectangle, and if yes, by how many pixels,
/// necessary for determining if / how to show a scrollbar + aligning / centering text.
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd)]
pub enum TextOverflow {
/// Text is overflowing, by how much (in pixels)?
IsOverflowing(f32),
/// Text is in bounds, how much space is available until the edge of the rectangle (in pixels)?
InBounds(f32),
}
/// Iterator over glyphs that returns information about the cluster that this glyph belongs to.
/// Returned by the `ScaledWord::cluster_iter()` function.
///
/// For each glyph, returns information about what cluster this glyph belongs to. Useful for
/// doing operations per-cluster instead of per-glyph.
/// *Note*: The iterator returns once-per-glyph, not once-per-cluster, however
/// you can merge the clusters into groups by using the `ClusterInfo.cluster_idx`.
#[derive(Debug, Clone)]
pub struct ClusterIterator<'a> {
/// What codepoint does the current glyph have - set to `None` if the first character isn't yet processed.
cur_codepoint: Option<u32>,
/// What cluster *index* are we currently at - default: 0
cluster_count: usize,
word: &'a ScaledWord,
/// Store what glyph we are currently processing in this word
cur_glyph_idx: usize,
}
/// Info about what cluster a certain glyph belongs to.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ClusterInfo {
/// Cluster index in this word
pub cluster_idx: usize,
/// Codepoint of this cluster
pub codepoint: u32,
/// What the glyph index of this cluster is
pub glyph_idx: usize,
}
impl<'a> Iterator for ClusterIterator<'a> {
type Item = ClusterInfo;
/// Returns an iterator over the clusters in this word.
///
/// Note: This will return one `ClusterInfo` per glyph, so you can't just
/// use `.cluster_iter().count()` to count the glyphs: Instead, use `.cluster_iter().last().cluster_idx`.
fn next(&mut self) -> Option<ClusterInfo> {
let next_glyph = self.word.glyph_infos.get(self.cur_glyph_idx)?;
let glyph_idx = self.cur_glyph_idx;
if self.cur_codepoint != Some(next_glyph.cluster) {
self.cur_codepoint = Some(next_glyph.cluster);
self.cluster_count += 1;
}
self.cur_glyph_idx += 1;
Some(ClusterInfo {
cluster_idx: self.cluster_count,
codepoint: self.cur_codepoint.unwrap_or(0),
glyph_idx,
})
}
}
impl ScaledWord {
/// Creates an iterator over clusters instead of glyphs
pub fn cluster_iter<'a>(&'a self) -> ClusterIterator<'a> {
ClusterIterator {
cur_codepoint: None,
cluster_count: 0,
word: &self,
cur_glyph_idx: 0,
}
}
pub fn number_of_clusters(&self) -> usize {
self.cluster_iter().last().map(|l| l.cluster_idx).unwrap_or(0)
}
}
/// Splits the text by whitespace into logical units (word, tab, return, whitespace).
pub fn split_text_into_words(text: &str) -> Words {
use unicode_normalization::UnicodeNormalization;
// Necessary because we need to handle both \n and \r\n characters
// If we just look at the characters one-by-one, this wouldn't be possible.
let normalized_string = text.nfc().collect::<String>();
let normalized_chars = normalized_string.chars().collect::<Vec<char>>();
let mut words = Vec::new();
// Instead of storing the actual word, the word is only stored as an index instead,
// which reduces allocations and is important for later on introducing RTL text
// (where the position of the character data does not correspond to the actual glyph order).
let mut current_word_start = 0;
let mut last_char_idx = 0;
let mut last_char_was_whitespace = false;
let char_len = normalized_chars.len();
for (ch_idx, ch) in normalized_chars.iter().enumerate() {
let ch = *ch;
let current_char_is_whitespace = ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n';
let should_push_delimiter = match ch {
' ' => {
Some(Word {
start: last_char_idx + 1,
end: ch_idx + 1,
word_type: WordType::Space
})
},
'\t' => {
Some(Word {
start: last_char_idx + 1,
end: ch_idx + 1,
word_type: WordType::Tab
})
},
'\n' => {
Some(if normalized_chars[last_char_idx] == '\r' {
// "\r\n" return
Word {
start: last_char_idx,
end: ch_idx + 1,
word_type: WordType::Return,
}
} else {
// "\n" return
Word {
start: last_char_idx + 1,
end: ch_idx + 1,
word_type: WordType::Return,
}
})
},
_ => None,
};
// Character is a whitespace or the character is the last character in the text (end of text)
let should_push_word = if current_char_is_whitespace && !last_char_was_whitespace {
Some(Word {
start: current_word_start,
end: ch_idx,
word_type: WordType::Word
})
} else {
None
};
if current_char_is_whitespace {
current_word_start = ch_idx + 1;
}
let mut push_words = |arr: [Option<Word>;2]| {
words.extend(arr.into_iter().filter_map(|e| *e));
};
push_words([should_push_word, should_push_delimiter]);
last_char_was_whitespace = current_char_is_whitespace;
last_char_idx = ch_idx;
}
// Push the last word
if current_word_start != last_char_idx + 1 {
words.push(Word {
start: current_word_start,
end: normalized_chars.len(),
word_type: WordType::Word
});
}
// If the last item is a `Return`, remove it
if let Some(Word { word_type: WordType::Return, .. }) = words.last() {
words.pop();
}
Words {
items: words,
internal_str: normalized_string,
internal_chars: normalized_chars,
}
}
/// Takes a text broken into semantic items and a font instance and
/// scales the font accordingly.
pub fn words_to_scaled_words(
words: &Words,
font_bytes: &[u8],
font_index: u32,
font_size_px: f32,
) -> ScaledWords {
use text_shaping::{self, HbBuffer, HbFont, HbScaledFont};
let hb_font = HbFont::from_bytes(font_bytes, font_index);
let hb_scaled_font = HbScaledFont::from_font(&hb_font, font_size_px);
// Get the dimensions of the space glyph
let hb_space_buffer = HbBuffer::from_str(" ");
let hb_shaped_space = text_shaping::shape_word_hb(&hb_space_buffer, &hb_scaled_font);
let space_advance_px = hb_shaped_space.glyph_positions[0].x_advance as f32 / 128.0; // TODO: Half width for spaces?
let space_codepoint = hb_shaped_space.glyph_infos[0].codepoint;
let hb_buffer_entire_paragraph = HbBuffer::from_str(&words.internal_str);
let hb_shaped_entire_paragraph = text_shaping::shape_word_hb(&hb_buffer_entire_paragraph, &hb_scaled_font);
let mut shaped_word_positions = Vec::new();
let mut shaped_word_infos = Vec::new();
let mut current_word_positions = Vec::new();
let mut current_word_infos = Vec::new();
for i in 0..hb_shaped_entire_paragraph.glyph_positions.len() {
let glyph_info = hb_shaped_entire_paragraph.glyph_infos[i];
let glyph_position = hb_shaped_entire_paragraph.glyph_positions[i];
let is_space = glyph_info.codepoint == space_codepoint;
if is_space {
shaped_word_positions.push(current_word_positions.clone());
shaped_word_infos.push(current_word_infos.clone());
current_word_positions.clear();
current_word_infos.clear();
} else {
current_word_positions.push(glyph_position);
current_word_infos.push(glyph_info);
}
}
if !current_word_positions.is_empty() {
shaped_word_positions.push(current_word_positions);
shaped_word_infos.push(current_word_infos);
}
let mut longest_word_width = 0.0_f32;
let scaled_words = words.items.iter()
.filter(|w| w.word_type == WordType::Word)
.enumerate()
.filter_map(|(word_idx, word)| {
let hb_glyph_positions = shaped_word_positions.get(word_idx)?;
let hb_glyph_infos = shaped_word_infos.get(word_idx)?;
let hb_word_width = text_shaping::get_word_visual_width_hb(&hb_glyph_positions);
let hb_glyph_positions = text_shaping::get_glyph_positions_hb(&hb_glyph_positions);
let hb_glyph_infos = text_shaping::get_glyph_infos_hb(&hb_glyph_infos);
longest_word_width = longest_word_width.max(hb_word_width.abs());
Some(ScaledWord {
glyph_infos: hb_glyph_infos,
glyph_positions: hb_glyph_positions,
word_width: hb_word_width,
})
}).collect();
ScaledWords {
items: scaled_words,
longest_word_width: longest_word_width,
space_advance_px,
space_codepoint,
font_size_px,
}
}
/// Positions the words on the screen (does not layout any glyph positions!), necessary for estimating
/// the intrinsic width + height of the text content.
pub fn position_words(
words: &Words,
scaled_words: &ScaledWords,
text_layout_options: &TextLayoutOptions,
font_size_px: f32,
) -> WordPositions {
use self::WordType::*;
use std::f32;
let space_advance = scaled_words.space_advance_px;
let word_spacing_px = space_advance * text_layout_options.word_spacing.unwrap_or(DEFAULT_WORD_SPACING);
let line_height_px = space_advance * text_layout_options.line_height.unwrap_or(DEFAULT_LINE_HEIGHT);
let tab_width_px = space_advance * text_layout_options.tab_width.unwrap_or(DEFAULT_TAB_WIDTH);
let letter_spacing_px = text_layout_options.letter_spacing.unwrap_or(DEFAULT_LETTER_SPACING);
let mut line_breaks = Vec::new();
let mut word_positions = Vec::new();
let mut line_number = 0;
let mut line_caret_x = 0.0;
let mut current_word_idx = 0;
macro_rules! advance_caret {($line_caret_x:expr) => ({
let caret_intersection = caret_intersects_with_holes(
$line_caret_x,
line_number,
font_size_px,
line_height_px,
&text_layout_options.holes,
text_layout_options.max_horizontal_width,
);
if let LineCaretIntersection::PushCaretOntoNextLine(_, _) = caret_intersection {
line_breaks.push((current_word_idx, line_caret_x));
}
// Correct and advance the line caret position
advance_caret(
&mut $line_caret_x,
&mut line_number,
caret_intersection,
);
})}
advance_caret!(line_caret_x);
if let Some(leading) = text_layout_options.leading {
line_caret_x += leading;
advance_caret!(line_caret_x);
}
// NOTE: word_idx increases only on words, not on other symbols!
let mut word_idx = 0;
macro_rules! handle_word {() => ({
let scaled_word = match scaled_words.items.get(word_idx) {
Some(s) => s,
None => continue,
};
let reserved_letter_spacing_px = match text_layout_options.letter_spacing {
None => 0.0,
Some(spacing_multiplier) => spacing_multiplier * scaled_word.number_of_clusters().saturating_sub(1) as f32,
};
// Calculate where the caret would be for the next word
let word_advance_x = scaled_word.word_width + reserved_letter_spacing_px;
let mut new_caret_x = line_caret_x + word_advance_x;
// NOTE: Slightly modified "advance_caret!(new_caret_x);" - due to line breaking behaviour
let caret_intersection = caret_intersects_with_holes(
new_caret_x,
line_number,
font_size_px,
line_height_px,
&text_layout_options.holes,
text_layout_options.max_horizontal_width,
);
let mut is_line_break = false;
if let LineCaretIntersection::PushCaretOntoNextLine(_, _) = caret_intersection {
line_breaks.push((current_word_idx, line_caret_x));
is_line_break = true;
}
if !is_line_break {
let line_caret_y = get_line_y_position(line_number, font_size_px, line_height_px);
word_positions.push(LayoutPoint::new(line_caret_x, line_caret_y));
}
// Correct and advance the line caret position
advance_caret(
&mut new_caret_x,
&mut line_number,
caret_intersection,
);
line_caret_x = new_caret_x;
// If there was a line break, the position needs to be determined after the line break happened
if is_line_break {
let line_caret_y = get_line_y_position(line_number, font_size_px, line_height_px);
word_positions.push(LayoutPoint::new(line_caret_x, line_caret_y));
// important! - if the word is pushed onto the next line, the caret has to be
// advanced by that words width!
line_caret_x += word_advance_x;
}
// NOTE: Word index is increased before pushing, since word indices are 1-indexed
// (so that paragraphs can be selected via "(0..word_index)").
word_idx += 1;
current_word_idx = word_idx;
})}
// The last word is a bit special: Any text must have at least one line break!
for word in words.items.iter().take(words.items.len().saturating_sub(1)) {
match word.word_type {
Word => {
handle_word!();
},
Return => {
line_breaks.push((current_word_idx, line_caret_x));
line_number += 1;
let mut new_caret_x = 0.0;
advance_caret!(new_caret_x);
line_caret_x = new_caret_x;
},
Space => {
let mut new_caret_x = line_caret_x + word_spacing_px;
advance_caret!(new_caret_x);
line_caret_x = new_caret_x;
},
Tab => {
let mut new_caret_x = line_caret_x + word_spacing_px + tab_width_px;
advance_caret!(new_caret_x);
line_caret_x = new_caret_x;
},
}
}
// Handle the last word, but ignore any last Return, Space or Tab characters
for word in &words.items[words.items.len().saturating_sub(1)..] {
if word.word_type == Word {
handle_word!();
}
line_breaks.push((current_word_idx, line_caret_x));
}
let trailing = line_caret_x;
let number_of_lines = line_number + 1;
let number_of_words = current_word_idx + 1;
let longest_line_width = line_breaks.iter().map(|(_word_idx, line_length)| *line_length).fold(0.0_f32, f32::max);
let content_size_y = get_line_y_position(line_number, font_size_px, line_height_px);
let content_size_x = text_layout_options.max_horizontal_width.unwrap_or(longest_line_width);
let content_size = LayoutSize::new(content_size_x, content_size_y);
WordPositions {
font_size_px,
text_layout_options: text_layout_options.clone(),
trailing,
number_of_words,
number_of_lines,
content_size,
word_positions,
line_breaks,
}
}
pub fn get_layouted_glyphs_unpositioned(
word_positions: &WordPositions,
scaled_words: &ScaledWords,
) -> LayoutedGlyphs {
use text_shaping;
let mut glyphs = Vec::with_capacity(scaled_words.items.len());
let letter_spacing_px = word_positions.text_layout_options.letter_spacing.unwrap_or(0.0);
for (scaled_word, word_position) in scaled_words.items.iter()
.zip(word_positions.word_positions.iter()) {
glyphs.extend(
text_shaping::get_glyph_instances_hb(&scaled_word.glyph_infos, &scaled_word.glyph_positions)
.into_iter()
.zip(scaled_word.cluster_iter())
.map(|(mut glyph, cluster_info)| {
glyph.point.x += word_position.x;
glyph.point.y += word_position.y;
glyph.point.x += letter_spacing_px * cluster_info.cluster_idx as f32;
glyph
})
)
}
LayoutedGlyphs { glyphs }
}
pub fn get_layouted_glyphs_with_horizonal_alignment(
word_positions: &WordPositions,
scaled_words: &ScaledWords,
alignment_horz: StyleTextAlignmentHorz,
) -> (LayoutedGlyphs, LineBreaks) {
let mut glyphs = get_layouted_glyphs_unpositioned(word_positions, scaled_words);
// Align glyphs horizontal
let line_breaks = get_char_indices(&word_positions, &scaled_words);
align_text_horz(&mut glyphs.glyphs, alignment_horz, &line_breaks);
(glyphs, line_breaks)
}
/// Returns the final glyphs and positions them relative to the `rect_offset`,
/// ready for webrender to display
pub fn get_layouted_glyphs(
word_positions: &WordPositions,
scaled_words: &ScaledWords,
alignment_horz: StyleTextAlignmentHorz,
alignment_vert: StyleTextAlignmentVert,
rect_offset: LayoutPoint,
bounding_size_height_px: f32,
) -> LayoutedGlyphs {
let (mut glyphs, line_breaks) = get_layouted_glyphs_with_horizonal_alignment(word_positions, scaled_words, alignment_horz);
// Align glyphs vertically
let vertical_overflow = get_vertical_overflow(&word_positions, bounding_size_height_px);
align_text_vert(&mut glyphs.glyphs, alignment_vert, &line_breaks, vertical_overflow);
add_origin(&mut glyphs.glyphs, rect_offset.x, rect_offset.y);
glyphs
}
/// Given a width, returns the vertical height and width of the text
pub fn get_positioned_word_bounding_box(word_positions: &WordPositions) -> LayoutSize {
word_positions.content_size
}
pub fn get_vertical_overflow(word_positions: &WordPositions, bounding_size_height_px: f32) -> TextOverflow {
let content_size = word_positions.content_size;
if bounding_size_height_px > content_size.height {
TextOverflow::InBounds(bounding_size_height_px - content_size.height)
} else {
TextOverflow::IsOverflowing(content_size.height - bounding_size_height_px)
}
}
pub fn word_item_is_return(item: &Word) -> bool {
item.word_type == WordType::Return
}
pub fn text_overflow_is_overflowing(overflow: &TextOverflow) -> bool {
use self::TextOverflow::*;
match overflow {
IsOverflowing(_) => true,
InBounds(_) => false,
}
}
pub fn get_char_indices(word_positions: &WordPositions, scaled_words: &ScaledWords) -> LineBreaks {
let width = word_positions.content_size.width;
if scaled_words.items.is_empty() {
return Vec::new();
}
let mut current_glyph_count = 0;
let mut last_word_idx = 0;
word_positions.line_breaks.iter().map(|(current_word_idx, line_length)| {
let remaining_space_px = width - line_length;
let words = &scaled_words.items[last_word_idx..*current_word_idx];
let glyphs_in_this_line: usize = words.iter().map(|w| w.glyph_infos.len()).sum::<usize>();
current_glyph_count += glyphs_in_this_line;
last_word_idx = *current_word_idx;
(current_glyph_count, remaining_space_px)
}).collect()
}
/// For a given line number (**NOTE: 0-indexed!**), calculates the Y
/// position of the bottom left corner
pub fn get_line_y_position(line_number: usize, font_size_px: f32, line_height_px: f32) -> f32 {
((font_size_px + line_height_px) * line_number as f32) + font_size_px
}
#[derive(Debug, Copy, Clone, PartialOrd, PartialEq)]
enum LineCaretIntersection {
/// OK: Caret does not interset any elements
NoIntersection,
/// In order to not intersect with any holes, the caret needs to
/// be advanced to the position x, but can stay on the same line.
AdvanceCaretTo(f32),
/// Caret needs to advance X number of lines and be positioned
/// with a leading of x
PushCaretOntoNextLine(usize, f32),
}
/// Check if the caret intersects with any holes and if yes, if the cursor should move to a new line.
///
/// # Inputs
///
/// - `line_caret_x`: The current horizontal caret position
/// - `line_number`: The current line number
/// - `holes`: Whether the text should respect any rectangular regions
/// where the text can't flow (preparation for inline / float layout).
/// - `max_width`: Does the text have a restriction on how wide it can be (in pixels)
fn caret_intersects_with_holes(
line_caret_x: f32,
line_number: usize,
font_size_px: f32,
line_height_px: f32,
holes: &[LayoutRect],
max_width: Option<f32>,
) -> LineCaretIntersection {
let mut new_line_caret_x = None;
let mut line_advance = 0;
// If the caret is outside of the max_width, move it to the start of a new line
if let Some(max_width) = max_width {
if line_caret_x > max_width {
new_line_caret_x = Some(0.0);
line_advance += 1;
}
}
for hole in holes {
let mut should_move_caret = false;
let mut current_line_advance = 0;
let mut new_line_number = line_number + current_line_advance;
let mut current_caret = LayoutPoint::new(
new_line_caret_x.unwrap_or(line_caret_x),
get_line_y_position(new_line_number, font_size_px, line_height_px)
);
// NOTE: holes need to be sorted by Y origin (from smallest to largest Y),
// and be sorted from left to right
while hole.contains(¤t_caret) {
should_move_caret = true;
if let Some(max_width) = max_width {
if hole.origin.x + hole.size.width >= max_width {
// Need to break the line here
current_line_advance += 1;
new_line_number = line_number + current_line_advance;
current_caret = LayoutPoint::new(
new_line_caret_x.unwrap_or(line_caret_x),
get_line_y_position(new_line_number, font_size_px, line_height_px)
);
} else {
new_line_number = line_number + current_line_advance;
current_caret = LayoutPoint::new(
hole.origin.x + hole.size.width,
get_line_y_position(new_line_number, font_size_px, line_height_px)
);
}
} else {
// No max width, so no need to break the line, move the caret to the right side of the hole
new_line_number = line_number + current_line_advance;
current_caret = LayoutPoint::new(
hole.origin.x + hole.size.width,
get_line_y_position(new_line_number, font_size_px, line_height_px)
);
}
}
if should_move_caret {
new_line_caret_x = Some(current_caret.x);
line_advance += current_line_advance;
}
}
if let Some(new_line_caret_x) = new_line_caret_x {
if line_advance == 0 {
LineCaretIntersection::AdvanceCaretTo(new_line_caret_x)
} else {
LineCaretIntersection::PushCaretOntoNextLine(line_advance, new_line_caret_x)
}
} else {
LineCaretIntersection::NoIntersection
}
}
fn advance_caret(caret: &mut f32, line_number: &mut usize, intersection: LineCaretIntersection) {
use self::LineCaretIntersection::*;
match intersection {
NoIntersection => { },
AdvanceCaretTo(x) => { *caret = x; },
PushCaretOntoNextLine(num_lines, x) => { *line_number += num_lines; *caret = x; },
}
}
pub fn align_text_horz(
glyphs: &mut [GlyphInstance],
alignment: StyleTextAlignmentHorz,
line_breaks: &[(usize, f32)]
) {
use azul_css::StyleTextAlignmentHorz::*;
// Text alignment is theoretically very simple:
//
// If we have a bunch of text, such as this (the `glyphs`):
// ^^^^^^^^^^^^
// ^^^^^^^^
// ^^^^^^^^^^^^^^^^
// ^^^^^^^^^^
// and we have information about how much space each line has to the right:
// (the "---" is the space)
// ^^^^^^^^^^^^----
// ^^^^^^^^--------
// ^^^^^^^^^^^^^^^^
// ^^^^^^^^^^------
// Then we can center-align the text, by just taking the "-----", dividing
// it by 2 and moving all characters to the right:
// --^^^^^^^^^^^^--
// ----^^^^^^^^----
// ^^^^^^^^^^^^^^^^
// ---^^^^^^^^^^---
// Same for right-aligned text, but without the "divide by 2 step"
if line_breaks.is_empty() || glyphs.is_empty() {
return; // ??? maybe a 0-height rectangle?
}
// // assert that the last info in the line_breaks vec has the same glyph index
// // i.e. the last line has to end with the last glyph
// assert!(glyphs.len() - 1 == line_breaks[line_breaks.len() - 1].0);
let multiply_factor = match alignment {
Left => return,
Center => 0.5, // move the line by the half width
Right => 1.0, // move the line by the full width
};
// If we have the characters "ABC\n\nDEF", this will result in:
//
// [ Glyph(A), Glyph(B), Glyph(C), Glyph(D), Glyph(E), Glyph(F)]
//
// [LineBreak(2), LineBreak(2), LineBreak(5)]
//
// If we'd just shift every character after the line break, we'd get into
// the problem of shifting the 3rd character twice, because of the \n\n.
//
// To avoid the double-line-break problem, we can use ranges:
//
// - from 0..=2, shift the characters at i by X amount
// - from 3..3 (e.g. 0 characters) shift the characters at i by X amount
// - from 3..=5 shift the characters by X amount
//
// Because the middle range selects 0 characters, the shift is effectively
// ignored, which is what we want - because there are no characters to shift.
let mut start_range_char = 0;
for (line_break_char, line_break_amount) in line_breaks {
// NOTE: Inclusive range - beware: off-by-one-errors!
for glyph in &mut glyphs[start_range_char..*line_break_char] {
let old_glyph_x = glyph.point.x;
glyph.point.x += line_break_amount * multiply_factor;
}
start_range_char = *line_break_char; // NOTE: beware off-by-one error - note the +1!
}
}
pub fn align_text_vert(
glyphs: &mut [GlyphInstance],
alignment: StyleTextAlignmentVert,
line_breaks: &[(usize, f32)],
vertical_overflow: TextOverflow,
){
use self::TextOverflow::*;
use self::StyleTextAlignmentVert::*;
if line_breaks.is_empty() || glyphs.is_empty() {
return;
}
// // Die if we have a line break at a position bigger than the position of the last glyph,
// // because something went horribly wrong!
// //
// // The next unwrap is always safe as line_breaks will have a minimum of one entry!
// assert!(glyphs.len() - 1 == line_breaks.last().unwrap().0);
let multiply_factor = match alignment {
Top => return,
Center => 0.5,
Bottom => 1.0,
};
let space_to_add = match vertical_overflow {
IsOverflowing(_) => return,
InBounds(remaining_space_px) => {
// Total text height (including last leading!)
// All metrics in pixels
(remaining_space_px * multiply_factor)
},
};
glyphs.iter_mut().for_each(|g| g.point.y += space_to_add);
}
/// Adds the X and Y offset to each glyph in the positioned glyph
pub fn add_origin(positioned_glyphs: &mut [GlyphInstance], x: f32, y: f32) {
for c in positioned_glyphs {
c.point.x += x;
c.point.y += y;
}
}
#[test]
fn test_split_words() {
fn print_words(w: &Words) {
println!("-- string: {:?}", w.get_str());
for item in &w.items {
println!("{:?} - ({}..{}) = {:?}", w.get_substr(item), item.start, item.end, item.word_type);
}
}
fn string_to_vec(s: String) -> Vec<char> {
s.chars().collect()
}
fn assert_words(expected: &Words, got_words: &Words) {
for (idx, expected_word) in expected.items.iter().enumerate() {
let got = got_words.items.get(idx);
if got != Some(expected_word) {
println!("expected: ");
print_words(expected);
println!("got: ");
print_words(got_words);
panic!("Expected word idx {} - expected: {:#?}, got: {:#?}", idx, Some(expected_word), got);
}
}
}
let ascii_str = String::from("abc\tdef \nghi\r\njkl");
let words_ascii = split_text_into_words(&ascii_str);
let words_ascii_expected = Words {
internal_str: ascii_str.clone(),
internal_chars: string_to_vec(ascii_str),
items: vec![
Word { start: 0, end: 3, word_type: WordType::Word }, // "abc" - (0..3) = Word
Word { start: 3, end: 4, word_type: WordType::Tab }, // "\t" - (3..4) = Tab
Word { start: 4, end: 7, word_type: WordType::Word }, // "def" - (4..7) = Word
Word { start: 7, end: 8, word_type: WordType::Space }, // " " - (7..8) = Space
Word { start: 8, end: 9, word_type: WordType::Space }, // " " - (8..9) = Space
Word { start: 9, end: 10, word_type: WordType::Return }, // "\n" - (9..10) = Return
Word { start: 10, end: 13, word_type: WordType::Word }, // "ghi" - (10..13) = Word
Word { start: 13, end: 15, word_type: WordType::Return }, // "\r\n" - (13..15) = Return
Word { start: 15, end: 18, word_type: WordType::Word }, // "jkl" - (15..18) = Word
],
};
assert_words(&words_ascii_expected, &words_ascii);
let unicode_str = String::from("㌊㌋㌌㌍㌎㌏㌐㌑ ㌒㌓㌔㌕㌖㌗");
let words_unicode = split_text_into_words(&unicode_str);
let words_unicode_expected = Words {
internal_str: unicode_str.clone(),
internal_chars: string_to_vec(unicode_str),
items: vec![
Word { start: 0, end: 8, word_type: WordType::Word }, // "㌊㌋㌌㌍㌎㌏㌐㌑"
Word { start: 8, end: 9, word_type: WordType::Space }, // " "
Word { start: 9, end: 15, word_type: WordType::Word }, // "㌒㌓㌔㌕㌖㌗"
],
};
assert_words(&words_unicode_expected, &words_unicode);
let single_str = String::from("A");
let words_single_str = split_text_into_words(&single_str);
let words_single_str_expected = Words {
internal_str: single_str.clone(),
internal_chars: string_to_vec(single_str),
items: vec![
Word { start: 0, end: 1, word_type: WordType::Word }, // "A"
],
};
assert_words(&words_single_str_expected, &words_single_str);
}
#[test]
fn test_get_line_y_position() {
assert_eq!(get_line_y_position(0, 20.0, 0.0), 20.0);
assert_eq!(get_line_y_position(1, 20.0, 0.0), 40.0);
assert_eq!(get_line_y_position(2, 20.0, 0.0), 60.0);
// lines:
// 0 - height 20, padding 5 = 20.0 (padding is for the next line)
// 1 - height 20, padding 5 = 45.0 ( = 20 + 20 + 5)
// 2 - height 20, padding 5 = 70.0 ( = 20 + 20 + 5 + 20 + 5)
assert_eq!(get_line_y_position(0, 20.0, 5.0), 20.0);
assert_eq!(get_line_y_position(1, 20.0, 5.0), 45.0);
assert_eq!(get_line_y_position(2, 20.0, 5.0), 70.0);
}
// Scenario 1:
//
// +---------+
// |+ ------>|+
// | |
// +---------+
// rectangle: 100x200
// max-width: none, line-height 1.0, font-size: 20
// cursor is at: 0x, 20y
// expect cursor to advance to 100x, 20y
//
#[test]
fn test_caret_intersects_with_holes_1() {
let line_caret_x = 0.0;
let line_number = 0;
let font_size_px = 20.0;
let line_height_px = 0.0;
let max_width = None;
let holes = vec![LayoutRect::new(LayoutPoint::new(0.0, 0.0), LayoutSize::new(200.0, 100.0))];
let result = caret_intersects_with_holes(
line_caret_x,
line_number,
font_size_px,
line_height_px,
&holes,
max_width,
);
assert_eq!(result, LineCaretIntersection::AdvanceCaretTo(200.0));
}
// Scenario 2:
//
// +---------+
// |+ -----> |
// |-------> |
// |---------|
// |+ |
// | |
// +---------+
// rectangle: 100x200
// max-width: 200px, line-height 1.0, font-size: 20
// cursor is at: 0x, 20y
// expect cursor to advance to 0x, 100y (+= 4 lines)
//
#[test]
fn test_caret_intersects_with_holes_2() {
let line_caret_x = 0.0;
let line_number = 0;
let font_size_px = 20.0;
let line_height_px = 0.0;
let max_width = Some(200.0);
let holes = vec![LayoutRect::new(LayoutPoint::new(0.0, 0.0), LayoutSize::new(200.0, 100.0))];
let result = caret_intersects_with_holes(
line_caret_x,
line_number,
font_size_px,
line_height_px,
&holes,
max_width,
);
assert_eq!(result, LineCaretIntersection::PushCaretOntoNextLine(4, 0.0));
}
// Scenario 3:
//
// +----------------+
// | | | +----->
// |------->+ |
// |------+ |
// | |
// | |
// +----------------+
// rectangle: 100x200
// max-width: 400px, line-height 1.0, font-size: 20
// cursor is at: 450x, 20y
// expect cursor to advance to 200x, 40y (+= 1 lines, leading of 200px)
//
#[test]
fn test_caret_intersects_with_holes_3() {
let line_caret_x = 450.0;
let line_number = 0;
let font_size_px = 20.0;
let line_height_px = 0.0;
let max_width = Some(400.0);
let holes = vec![LayoutRect::new(LayoutPoint::new(0.0, 0.0), LayoutSize::new(200.0, 100.0))];
let result = caret_intersects_with_holes(
line_caret_x,
line_number,
font_size_px,
line_height_px,
&holes,
max_width,
);
assert_eq!(result, LineCaretIntersection::PushCaretOntoNextLine(1, 200.0));
}
// Scenario 4:
//
// +----------------+
// | + +------+ |
// | | | |
// | | | |
// | +------+ |
// | |
// +----------------+
// rectangle: 100x200 @ 80.0x, 20.0y
// max-width: 400px, line-height 1.0, font-size: 20
// cursor is at: 40x, 20y
// expect cursor to not advance at all
//
#[test]
fn test_caret_intersects_with_holes_4() {
let line_caret_x = 40.0;
let line_number = 0;
let font_size_px = 20.0;
let line_height_px = 0.0;
let max_width = Some(400.0);
let holes = vec![LayoutRect::new(LayoutPoint::new(80.0, 20.0), LayoutSize::new(200.0, 100.0))];
let result = caret_intersects_with_holes(
line_caret_x,
line_number,
font_size_px,
line_height_px,
&holes,
max_width,
);
assert_eq!(result, LineCaretIntersection::NoIntersection);
}
| 35.14038 | 127 | 0.624016 |
1cd5a0818227d6fc698981fbabb4791e78a85d22 | 422 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{block_metadata::BlockMetadata, test_helpers::assert_canonical_encode_decode};
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(20))]
#[test]
fn test_block_metadata_canonical_serialization(data in any::<BlockMetadata>()) {
assert_canonical_encode_decode(data);
}
}
| 28.133333 | 89 | 0.739336 |
f4515c687ac50ee52fbe391220c383fc26c980a6 | 7,321 | /// Define a type that supports parsing and printing a given identifier as if it
/// were a keyword.
///
/// # Usage
///
/// As a convention, it is recommended that this macro be invoked within a
/// module called `kw` or `keyword` and that the resulting parser be invoked
/// with a `kw::` or `keyword::` prefix.
///
/// ```edition2018
/// mod kw {
/// syn::custom_keyword!(whatever);
/// }
/// ```
///
/// The generated syntax tree node supports the following operations just like
/// any built-in keyword token.
///
/// - [Peeking] — `input.peek(kw::whatever)`
///
/// - [Parsing] — `input.parse::<kw::whatever>()?`
///
/// - [Printing] — `quote!( ... #whatever_token ... )`
///
/// - Construction from a [`Span`] — `let whatever_token = kw::whatever(sp)`
///
/// - Field access to its span — `let sp = whatever_token.span`
///
/// [Peeking]: parse::ParseBuffer::peek
/// [Parsing]: parse::ParseBuffer::parse
/// [Printing]: quote::ToTokens
/// [`Span`]: proc_macro2::Span
///
/// # Example
///
/// This example parses input that looks like `bool = true` or `str = "value"`.
/// The key must be either the identifier `bool` or the identifier `str`. If
/// `bool`, the value may be either `true` or `false`. If `str`, the value may
/// be any string literal.
///
/// The symbols `bool` and `str` are not reserved keywords in Rust so these are
/// not considered keywords in the `syn::token` module. Like any other
/// identifier that is not a keyword, these can be declared as custom keywords
/// by crates that need to use them as such.
///
/// ```edition2018
/// use syn::{LitBool, LitStr, Result, Token};
/// use syn::parse::{Parse, ParseStream};
///
/// mod kw {
/// syn::custom_keyword!(bool);
/// syn::custom_keyword!(str);
/// }
///
/// enum Argument {
/// Bool {
/// bool_token: kw::bool,
/// eq_token: Token![=],
/// value: LitBool,
/// },
/// Str {
/// str_token: kw::str,
/// eq_token: Token![=],
/// value: LitStr,
/// },
/// }
///
/// impl Parse for Argument {
/// fn parse(input: ParseStream) -> Result<Self> {
/// let lookahead = input.lookahead1();
/// if lookahead.peek(kw::bool) {
/// Ok(Argument::Bool {
/// bool_token: input.parse::<kw::bool>()?,
/// eq_token: input.parse()?,
/// value: input.parse()?,
/// })
/// } else if lookahead.peek(kw::str) {
/// Ok(Argument::Str {
/// str_token: input.parse::<kw::str>()?,
/// eq_token: input.parse()?,
/// value: input.parse()?,
/// })
/// } else {
/// Err(lookahead.error())
/// }
/// }
/// }
/// ```
#[macro_export(local_inner_macros)]
macro_rules! custom_keyword {
($ident:ident) => {
#[allow(non_camel_case_types)]
pub struct $ident {
pub span: $crate::export::Span,
}
#[doc(hidden)]
#[allow(non_snake_case)]
pub fn $ident<__S: $crate::export::IntoSpans<[$crate::export::Span; 1]>>(
span: __S,
) -> $ident {
$ident {
span: $crate::export::IntoSpans::into_spans(span)[0],
}
}
impl $crate::export::Default for $ident {
fn default() -> Self {
$ident {
span: $crate::export::Span::call_site(),
}
}
}
impl_parse_for_custom_keyword!($ident);
impl_to_tokens_for_custom_keyword!($ident);
impl_clone_for_custom_keyword!($ident);
impl_extra_traits_for_custom_keyword!($ident);
};
}
// Not public API.
#[cfg(feature = "parsing")]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_parse_for_custom_keyword {
($ident:ident) => {
// For peek.
impl $crate::token::CustomToken for $ident {
fn peek(cursor: $crate::buffer::Cursor) -> $crate::export::bool {
if let Some((ident, _rest)) = cursor.ident() {
ident == stringify!($ident)
} else {
false
}
}
fn display() -> &'static $crate::export::str {
concat!("`", stringify!($ident), "`")
}
}
impl $crate::parse::Parse for $ident {
fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> {
input.step(|cursor| {
if let $crate::export::Some((ident, rest)) = cursor.ident() {
if ident == stringify!($ident) {
return $crate::export::Ok(($ident { span: ident.span() }, rest));
}
}
$crate::export::Err(cursor.error(concat!(
"expected `",
stringify!($ident),
"`"
)))
})
}
}
};
}
// Not public API.
#[cfg(not(feature = "parsing"))]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_parse_for_custom_keyword {
($ident:ident) => {};
}
// Not public API.
#[cfg(feature = "printing")]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_to_tokens_for_custom_keyword {
($ident:ident) => {
impl $crate::export::ToTokens for $ident {
fn to_tokens(&self, tokens: &mut $crate::export::TokenStream2) {
let ident = $crate::Ident::new(stringify!($ident), self.span);
$crate::export::TokenStreamExt::append(tokens, ident);
}
}
};
}
// Not public API.
#[cfg(not(feature = "printing"))]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_to_tokens_for_custom_keyword {
($ident:ident) => {};
}
// Not public API.
#[cfg(feature = "clone-impls")]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_clone_for_custom_keyword {
($ident:ident) => {
impl $crate::export::Copy for $ident {}
impl $crate::export::Clone for $ident {
fn clone(&self) -> Self {
*self
}
}
};
}
// Not public API.
#[cfg(not(feature = "clone-impls"))]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_clone_for_custom_keyword {
($ident:ident) => {};
}
// Not public API.
#[cfg(feature = "extra-traits")]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_extra_traits_for_custom_keyword {
($ident:ident) => {
impl $crate::export::Debug for $ident {
fn fmt(&self, f: &mut $crate::export::Formatter) -> $crate::export::fmt::Result {
$crate::export::Formatter::write_str(
f,
concat!("Keyword [", stringify!($ident), "]"),
)
}
}
impl $crate::export::Eq for $ident {}
impl $crate::export::PartialEq for $ident {
fn eq(&self, _other: &Self) -> $crate::export::bool {
true
}
}
impl $crate::export::Hash for $ident {
fn hash<__H: $crate::export::Hasher>(&self, _state: &mut __H) {}
}
};
}
// Not public API.
#[cfg(not(feature = "extra-traits"))]
#[doc(hidden)]
#[macro_export]
macro_rules! impl_extra_traits_for_custom_keyword {
($ident:ident) => {};
}
| 28.936759 | 93 | 0.520284 |
6146534ab3a0b9b48fbe639b0bbae6b91a26222e | 2,493 | use std::{thread, time};
use penrose::{core::hooks::Hook, draw::*, Config, Result, WindowManager, XcbConnection};
const HEIGHT: usize = 18;
const PROFONT: &str = "ProFont For Powerline";
const FIRA: &str = "Fira Code";
const SERIF: &str = "Serif";
const BLACK: u32 = 0x282828ff;
const GREY: u32 = 0x3c3836ff;
const WHITE: u32 = 0xebdbb2ff;
const PURPLE: u32 = 0xb16286ff;
const BLUE: u32 = 0x458588ff;
const RED: u32 = 0xcc241dff;
fn main() -> Result<()> {
bar_draw()?;
simple_draw()?;
Ok(())
}
fn bar_draw() -> Result<()> {
let workspaces = &["1", "2", "3", "4", "5", "6"];
let style = TextStyle {
font: PROFONT.to_string(),
point_size: 11,
fg: WHITE.into(),
bg: Some(BLACK.into()),
padding: (2.0, 2.0),
};
let highlight = BLUE;
let empty_ws = GREY;
let mut bar = dwm_bar(
Box::new(XCBDraw::new()?),
HEIGHT,
&style,
highlight,
empty_ws,
workspaces,
)?;
let config = Config::default();
let conn = XcbConnection::new()?;
let mut wm = WindowManager::init(config, &conn);
bar.startup(&mut wm); // ensure widgets are initialised correctly
thread::sleep(time::Duration::from_millis(1000));
for focused in 1..6 {
bar.workspace_change(&mut wm, focused - 1, focused);
bar.event_handled(&mut wm);
thread::sleep(time::Duration::from_millis(1000));
}
thread::sleep(time::Duration::from_millis(10000));
Ok(())
}
fn simple_draw() -> Result<()> {
let mut drw = XCBDraw::new()?;
let (_, _, w, _) = drw.screen_sizes()?[0].values();
let id = drw.new_window(&WindowType::Dock, 0, 0, w as usize, HEIGHT)?;
drw.register_font(PROFONT);
drw.register_font(SERIF);
drw.register_font(FIRA);
let mut ctx = drw.context_for(id)?;
ctx.color(&WHITE.into());
ctx.rectangle(0.0, 0.0, w as f64, HEIGHT as f64);
ctx.translate(1.0, 1.0);
ctx.color(&BLACK.into());
ctx.font(PROFONT, 12)?;
let (offset, _) = ctx.text("this is a simple test", 0.0, (0.0, 8.0))?;
ctx.color(&RED.into());
ctx.font(SERIF, 10)?;
ctx.translate((offset + 5.0) as f64, 0.0);
let (offset, _) = ctx.text("BORK BORK!", 0.0, (0.0, 0.0))?;
ctx.color(&PURPLE.into());
ctx.font(FIRA, 10)?;
ctx.translate((offset + 5.0) as f64, 0.0);
ctx.text("Look at all the colors!", 0.0, (0.0, 0.0))?;
drw.flush(id);
thread::sleep(time::Duration::from_millis(5000));
Ok(())
}
| 27.097826 | 88 | 0.582431 |
14a1a1a02b0087e46cce5558a0b214760413b4ba | 762 | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under the MIT license <LICENSE-MIT
// http://opensource.org/licenses/MIT> or the Modified BSD license <LICENSE-BSD
// https://opensource.org/licenses/BSD-3-Clause>, at your option. This file may not be copied,
// modified, or distributed except according to those terms. Please review the Licences for the
// specific language governing permissions and limitations relating to use of the SAFE Network
// Software.
extern crate system_uri;
use std::env;
use system_uri::open;
fn main() {
if let Some(url) = env::args().nth(1) {
println!("Trying to open {}", url);
let _ = open(url);
} else {
println!("Please specify your URL")
}
}
| 33.130435 | 95 | 0.698163 |
757933661505d244c13084c66a4b084646e7f215 | 59,989 | // DO NOT EDIT !
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
//! This documentation was generated from *Drive Activity* crate version *1.0.11+20190702*, where *20190702* is the exact revision of the *driveactivity:v2* schema built by the [mako](http://www.makotemplates.org/) code generator *v1.0.11*.
//!
//! Everything else about the *Drive Activity* *v2* API can be found at the
//! [official documentation site](https://developers.google.com/drive/activity/).
//! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/driveactivity2).
//! # Features
//!
//! Handle the following *Resources* with ease from the central [hub](struct.DriveActivityHub.html) ...
//!
//! * activity
//! * [*query*](struct.ActivityQueryCall.html)
//!
//!
//!
//!
//! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](http://byron.github.io/google-apis-rs).
//!
//! # Structure of this Library
//!
//! The API is structured into the following primary items:
//!
//! * **[Hub](struct.DriveActivityHub.html)**
//! * a central object to maintain state and allow accessing all *Activities*
//! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn
//! allow access to individual [*Call Builders*](trait.CallBuilder.html)
//! * **[Resources](trait.Resource.html)**
//! * primary types that you can apply *Activities* to
//! * a collection of properties and *Parts*
//! * **[Parts](trait.Part.html)**
//! * a collection of properties
//! * never directly used in *Activities*
//! * **[Activities](trait.CallBuilder.html)**
//! * operations to apply to *Resources*
//!
//! All *structures* are marked with applicable traits to further categorize them and ease browsing.
//!
//! Generally speaking, you can invoke *Activities* like this:
//!
//! ```Rust,ignore
//! let r = hub.resource().activity(...).doit()
//! ```
//!
//! Or specifically ...
//!
//! ```ignore
//! let r = hub.activity().query(...).doit()
//! ```
//!
//! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities`
//! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be
//! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired.
//! The `doit()` method performs the actual communication with the server and returns the respective result.
//!
//! # Usage
//!
//! ## Setting up your Project
//!
//! To use this library, you would put the following lines into your `Cargo.toml` file:
//!
//! ```toml
//! [dependencies]
//! google-driveactivity2 = "*"
//! # This project intentionally uses an old version of Hyper. See
//! # https://github.com/Byron/google-apis-rs/issues/173 for more
//! # information.
//! hyper = "^0.10"
//! hyper-rustls = "^0.6"
//! serde = "^1.0"
//! serde_json = "^1.0"
//! yup-oauth2 = "^1.0"
//! ```
//!
//! ## A complete example
//!
//! ```test_harness,no_run
//! extern crate hyper;
//! extern crate hyper_rustls;
//! extern crate yup_oauth2 as oauth2;
//! extern crate google_driveactivity2 as driveactivity2;
//! use driveactivity2::QueryDriveActivityRequest;
//! use driveactivity2::{Result, Error};
//! # #[test] fn egal() {
//! use std::default::Default;
//! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
//! use driveactivity2::DriveActivityHub;
//!
//! // Get an ApplicationSecret instance by some means. It contains the `client_id` and
//! // `client_secret`, among other things.
//! let secret: ApplicationSecret = Default::default();
//! // Instantiate the authenticator. It will choose a suitable authentication flow for you,
//! // unless you replace `None` with the desired Flow.
//! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
//! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
//! // retrieve them from storage.
//! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
//! hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
//! <MemoryStorage as Default>::default(), None);
//! let mut hub = DriveActivityHub::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
//! // As the method needs a request, you would usually fill it with the desired information
//! // into the respective structure. Some of the parts shown here might not be applicable !
//! // Values shown here are possibly random and not representative !
//! let mut req = QueryDriveActivityRequest::default();
//!
//! // You can configure optional parameters by calling the respective setters at will, and
//! // execute the final call using `doit()`.
//! // Values shown here are possibly random and not representative !
//! let result = hub.activity().query(req)
//! .doit();
//!
//! match result {
//! Err(e) => match e {
//! // The Error enum provides details about what exactly happened.
//! // You can also just use its `Debug`, `Display` or `Error` traits
//! Error::HttpError(_)
//! |Error::MissingAPIKey
//! |Error::MissingToken(_)
//! |Error::Cancelled
//! |Error::UploadSizeLimitExceeded(_, _)
//! |Error::Failure(_)
//! |Error::BadRequest(_)
//! |Error::FieldClash(_)
//! |Error::JsonDecodeError(_, _) => println!("{}", e),
//! },
//! Ok(res) => println!("Success: {:?}", res),
//! }
//! # }
//! ```
//! ## Handling Errors
//!
//! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of
//! the doit() methods, or handed as possibly intermediate results to either the
//! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](https://docs.rs/yup-oauth2/*/yup_oauth2/trait.AuthenticatorDelegate.html).
//!
//! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This
//! makes the system potentially resilient to all kinds of errors.
//!
//! ## Uploads and Downloads
//! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be
//! read by you to obtain the media.
//! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default.
//! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making
//! this call: `.param("alt", "media")`.
//!
//! Methods supporting uploads can do so using up to 2 different protocols:
//! *simple* and *resumable*. The distinctiveness of each is represented by customized
//! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively.
//!
//! ## Customization and Callbacks
//!
//! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the
//! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call.
//! Respective methods will be called to provide progress information, as well as determine whether the system should
//! retry on failure.
//!
//! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort.
//!
//! ## Optional Parts in Server-Requests
//!
//! All structures provided by this library are made to be [enocodable](trait.RequestValue.html) and
//! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses
//! are valid.
//! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to
//! the server to indicate either the set parts of the request or the desired parts in the response.
//!
//! ## Builder Arguments
//!
//! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods.
//! These will always take a single argument, for which the following statements are true.
//!
//! * [PODs][wiki-pod] are handed by copy
//! * strings are passed as `&str`
//! * [request values](trait.RequestValue.html) are moved
//!
//! Arguments will always be copied or cloned into the builder, to make them independent of their original life times.
//!
//! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure
//! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern
//! [google-go-api]: https://github.com/google/google-api-go-client
//!
//!
// Unused attributes happen thanks to defined, but unused structures
// We don't warn about this, as depending on the API, some data structures or facilities are never used.
// Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any
// unused imports in fully featured APIs. Same with unused_mut ... .
#![allow(unused_imports, unused_mut, dead_code)]
// DO NOT EDIT !
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
#[macro_use]
extern crate serde_derive;
extern crate hyper;
extern crate serde;
extern crate serde_json;
extern crate yup_oauth2 as oauth2;
extern crate mime;
extern crate url;
mod cmn;
use std::collections::HashMap;
use std::cell::RefCell;
use std::borrow::BorrowMut;
use std::default::Default;
use std::collections::BTreeMap;
use serde_json as json;
use std::io;
use std::fs;
use std::mem;
use std::thread::sleep;
use std::time::Duration;
pub use cmn::*;
// ##############
// UTILITIES ###
// ############
/// Identifies the an OAuth2 authorization scope.
/// A scope is needed when requesting an
/// [authorization token](https://developers.google.com/youtube/v3/guides/authentication).
#[derive(PartialEq, Eq, Hash)]
pub enum Scope {
/// View and add to the activity record of files in your Google Drive
DriveActivity,
/// View the activity record of files in your Google Drive
DriveActivityReadonly,
}
impl AsRef<str> for Scope {
fn as_ref(&self) -> &str {
match *self {
Scope::DriveActivity => "https://www.googleapis.com/auth/drive.activity",
Scope::DriveActivityReadonly => "https://www.googleapis.com/auth/drive.activity.readonly",
}
}
}
impl Default for Scope {
fn default() -> Scope {
Scope::DriveActivityReadonly
}
}
// ########
// HUB ###
// ######
/// Central instance to access all DriveActivityHub related resource activities
///
/// # Examples
///
/// Instantiate a new hub
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_driveactivity2 as driveactivity2;
/// use driveactivity2::QueryDriveActivityRequest;
/// use driveactivity2::{Result, Error};
/// # #[test] fn egal() {
/// use std::default::Default;
/// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// use driveactivity2::DriveActivityHub;
///
/// // Get an ApplicationSecret instance by some means. It contains the `client_id` and
/// // `client_secret`, among other things.
/// let secret: ApplicationSecret = Default::default();
/// // Instantiate the authenticator. It will choose a suitable authentication flow for you,
/// // unless you replace `None` with the desired Flow.
/// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
/// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
/// // retrieve them from storage.
/// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// <MemoryStorage as Default>::default(), None);
/// let mut hub = DriveActivityHub::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = QueryDriveActivityRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.activity().query(req)
/// .doit();
///
/// match result {
/// Err(e) => match e {
/// // The Error enum provides details about what exactly happened.
/// // You can also just use its `Debug`, `Display` or `Error` traits
/// Error::HttpError(_)
/// |Error::MissingAPIKey
/// |Error::MissingToken(_)
/// |Error::Cancelled
/// |Error::UploadSizeLimitExceeded(_, _)
/// |Error::Failure(_)
/// |Error::BadRequest(_)
/// |Error::FieldClash(_)
/// |Error::JsonDecodeError(_, _) => println!("{}", e),
/// },
/// Ok(res) => println!("Success: {:?}", res),
/// }
/// # }
/// ```
pub struct DriveActivityHub<C, A> {
client: RefCell<C>,
auth: RefCell<A>,
_user_agent: String,
_base_url: String,
_root_url: String,
}
impl<'a, C, A> Hub for DriveActivityHub<C, A> {}
impl<'a, C, A> DriveActivityHub<C, A>
where C: BorrowMut<hyper::Client>, A: oauth2::GetToken {
pub fn new(client: C, authenticator: A) -> DriveActivityHub<C, A> {
DriveActivityHub {
client: RefCell::new(client),
auth: RefCell::new(authenticator),
_user_agent: "google-api-rust-client/1.0.11".to_string(),
_base_url: "https://driveactivity.googleapis.com/".to_string(),
_root_url: "https://driveactivity.googleapis.com/".to_string(),
}
}
pub fn activity(&'a self) -> ActivityMethods<'a, C, A> {
ActivityMethods { hub: &self }
}
/// Set the user-agent header field to use in all requests to the server.
/// It defaults to `google-api-rust-client/1.0.11`.
///
/// Returns the previously set user-agent.
pub fn user_agent(&mut self, agent_name: String) -> String {
mem::replace(&mut self._user_agent, agent_name)
}
/// Set the base url to use in all requests to the server.
/// It defaults to `https://driveactivity.googleapis.com/`.
///
/// Returns the previously set base url.
pub fn base_url(&mut self, new_base_url: String) -> String {
mem::replace(&mut self._base_url, new_base_url)
}
/// Set the root url to use in all requests to the server.
/// It defaults to `https://driveactivity.googleapis.com/`.
///
/// Returns the previously set root url.
pub fn root_url(&mut self, new_root_url: String) -> String {
mem::replace(&mut self._root_url, new_root_url)
}
}
// ############
// SCHEMAS ###
// ##########
/// Information about an impersonation, where an admin acts on behalf of an end
/// user. Information about the acting admin is not currently available.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Impersonation {
/// The impersonated user.
#[serde(rename="impersonatedUser")]
pub impersonated_user: Option<User>,
}
impl Part for Impersonation {}
/// Information about a group.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Group {
/// The email address of the group.
pub email: Option<String>,
/// The title of the group.
pub title: Option<String>,
}
impl Part for Group {}
/// A change of the permission setting on an item.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PermissionChange {
/// The set of permissions removed by this change.
#[serde(rename="removedPermissions")]
pub removed_permissions: Option<Vec<Permission>>,
/// The set of permissions added by this change.
#[serde(rename="addedPermissions")]
pub added_permissions: Option<Vec<Permission>>,
}
impl Part for PermissionChange {}
/// An object was created.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Create {
/// If present, indicates the object was newly created (e.g. as a blank
/// document), not derived from a Drive object or external object.
pub new: Option<New>,
/// If present, indicates the object was created by copying an existing Drive
/// object.
pub copy: Option<Copy>,
/// If present, indicates the object originated externally and was uploaded
/// to Drive.
pub upload: Option<Upload>,
}
impl Part for Create {}
/// Information about time ranges.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TimeRange {
/// The end of the time range.
#[serde(rename="endTime")]
pub end_time: Option<String>,
/// The start of the time range.
#[serde(rename="startTime")]
pub start_time: Option<String>,
}
impl Part for TimeRange {}
/// Activity in applications other than Drive.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ApplicationReference {
/// The reference type corresponding to this event.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for ApplicationReference {}
/// The actor of a Drive activity.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Actor {
/// An account acting on behalf of another.
pub impersonation: Option<Impersonation>,
/// An administrator.
pub administrator: Option<Administrator>,
/// A non-user actor (i.e. system triggered).
pub system: Option<SystemEvent>,
/// An anonymous user.
pub anonymous: Option<AnonymousUser>,
/// An end user.
pub user: Option<User>,
}
impl Part for Actor {}
/// Information about restriction policy changes to a feature.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RestrictionChange {
/// The feature which had a change in restriction policy.
pub feature: Option<String>,
/// The restriction in place after the change.
#[serde(rename="newRestriction")]
pub new_restriction: Option<String>,
}
impl Part for RestrictionChange {}
/// Represents any user (including a logged out user).
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Anyone { _never_set: Option<bool> }
impl Part for Anyone {}
/// A user about whom nothing is currently known.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct UnknownUser { _never_set: Option<bool> }
impl Part for UnknownUser {}
/// An object was created from scratch.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct New { _never_set: Option<bool> }
impl Part for New {}
/// A lightweight reference to a shared drive.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DriveReference {
/// The resource name of the shared drive. The format is
/// "COLLECTION_ID/DRIVE_ID". Clients should not assume a specific collection
/// ID for this resource name.
pub name: Option<String>,
/// The title of the shared drive.
pub title: Option<String>,
}
impl Part for DriveReference {}
/// This item is deprecated; please see `DriveFolder` instead.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Folder {
/// This field is deprecated; please see `DriveFolder.type` instead.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for Folder {}
/// An object was uploaded into Drive.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Upload { _never_set: Option<bool> }
impl Part for Upload {}
/// A user whose account has since been deleted.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DeletedUser { _never_set: Option<bool> }
impl Part for DeletedUser {}
/// This item is deprecated; please see `Drive` instead.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TeamDrive {
/// This field is deprecated; please see `Drive.root` instead.
pub root: Option<DriveItem>,
/// This field is deprecated; please see `Drive.name` instead.
pub name: Option<String>,
/// This field is deprecated; please see `Drive.title` instead.
pub title: Option<String>,
}
impl Part for TeamDrive {}
/// Empty message representing an administrator.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Administrator { _never_set: Option<bool> }
impl Part for Administrator {}
/// Empty message representing an anonymous user or indicating the authenticated
/// user should be anonymized.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct AnonymousUser { _never_set: Option<bool> }
impl Part for AnonymousUser {}
/// The permission setting of an object.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Permission {
/// If set, this permission applies to anyone, even logged out users.
pub anyone: Option<Anyone>,
/// The domain to whom this permission applies.
pub domain: Option<Domain>,
/// The group to whom this permission applies.
pub group: Option<Group>,
/// If true, the item can be discovered (e.g. in the user's "Shared with me"
/// collection) without needing a link to the item.
#[serde(rename="allowDiscovery")]
pub allow_discovery: Option<bool>,
/// Indicates the
/// <a href="/drive/web/manage-sharing#roles">Google Drive permissions
/// role</a>. The role determines a user's ability to read, write, and
/// comment on items.
pub role: Option<String>,
/// The user to whom this permission applies.
pub user: Option<User>,
}
impl Part for Permission {}
/// A comment with an assignment.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Assignment {
/// The sub-type of this event.
pub subtype: Option<String>,
}
impl Part for Assignment {}
/// Event triggered by system operations instead of end users.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct SystemEvent {
/// The type of the system event that may triggered activity.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for SystemEvent {}
/// A Drive item which is a file.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DriveFile { _never_set: Option<bool> }
impl Part for DriveFile {}
/// Data describing the type and additional information of an action.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ActionDetail {
/// A change about comments was made.
pub comment: Option<Comment>,
/// A change happened in data leak prevention status.
#[serde(rename="dlpChange")]
pub dlp_change: Option<DataLeakPreventionChange>,
/// An object was referenced in an application outside of Drive/Docs.
pub reference: Option<ApplicationReference>,
/// The permission on an object was changed.
#[serde(rename="permissionChange")]
pub permission_change: Option<PermissionChange>,
/// An object was edited.
pub edit: Option<Edit>,
/// Settings were changed.
#[serde(rename="settingsChange")]
pub settings_change: Option<SettingsChange>,
/// An object was created.
pub create: Option<Create>,
/// An object was moved.
#[serde(rename="move")]
pub move_: Option<Move>,
/// An object was renamed.
pub rename: Option<Rename>,
/// A deleted object was restored.
pub restore: Option<Restore>,
/// An object was deleted.
pub delete: Option<Delete>,
}
impl Part for ActionDetail {}
/// A Drive item, such as a file or folder.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DriveItem {
/// The MIME type of the Drive item. See
/// https://developers.google.com/drive/v3/web/mime-types.
#[serde(rename="mimeType")]
pub mime_type: Option<String>,
/// The target Drive item. The format is "items/ITEM_ID".
pub name: Option<String>,
/// The title of the Drive item.
pub title: Option<String>,
/// The Drive item is a folder.
#[serde(rename="driveFolder")]
pub drive_folder: Option<DriveFolder>,
/// The Drive item is a file.
#[serde(rename="driveFile")]
pub drive_file: Option<DriveFile>,
/// This field is deprecated; please use the `driveFile` field instead.
pub file: Option<File>,
/// Information about the owner of this Drive item.
pub owner: Option<Owner>,
/// This field is deprecated; please use the `driveFolder` field instead.
pub folder: Option<Folder>,
}
impl Part for DriveItem {}
/// A regular posted comment.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Post {
/// The sub-type of this event.
pub subtype: Option<String>,
}
impl Part for Post {}
/// An object was created by copying an existing object.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Copy {
/// The the original object.
#[serde(rename="originalObject")]
pub original_object: Option<TargetReference>,
}
impl Part for Copy {}
/// A lightweight reference to a Drive item, such as a file or folder.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DriveItemReference {
/// The Drive item is a file.
#[serde(rename="driveFile")]
pub drive_file: Option<DriveFile>,
/// The target Drive item. The format is "items/ITEM_ID".
pub name: Option<String>,
/// This field is deprecated; please use the `driveFile` field instead.
pub file: Option<File>,
/// The title of the Drive item.
pub title: Option<String>,
/// This field is deprecated; please use the `driveFolder` field instead.
pub folder: Option<Folder>,
/// The Drive item is a folder.
#[serde(rename="driveFolder")]
pub drive_folder: Option<DriveFolder>,
}
impl Part for DriveItemReference {}
/// A change in the object's data leak prevention status.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DataLeakPreventionChange {
/// The type of Data Leak Prevention (DLP) change.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for DataLeakPreventionChange {}
/// A change about comments on an object.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Comment {
/// A change on an assignment.
pub assignment: Option<Assignment>,
/// A change on a regular posted comment.
pub post: Option<Post>,
/// Users who are mentioned in this comment.
#[serde(rename="mentionedUsers")]
pub mentioned_users: Option<Vec<User>>,
/// A change on a suggestion.
pub suggestion: Option<Suggestion>,
}
impl Part for Comment {}
/// Information about the target of activity.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Target {
/// This field is deprecated; please use the `drive` field instead.
#[serde(rename="teamDrive")]
pub team_drive: Option<TeamDrive>,
/// The target is a comment on a Drive file.
#[serde(rename="fileComment")]
pub file_comment: Option<FileComment>,
/// The target is a Drive item.
#[serde(rename="driveItem")]
pub drive_item: Option<DriveItem>,
/// The target is a shared drive.
pub drive: Option<Drive>,
}
impl Part for Target {}
/// An empty message indicating an object was edited.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Edit { _never_set: Option<bool> }
impl Part for Edit {}
/// A strategy which does no consolidation of individual activities.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct NoConsolidation { _never_set: Option<bool> }
impl Part for NoConsolidation {}
/// A lightweight reference to the target of activity.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TargetReference {
/// This field is deprecated; please use the `drive` field instead.
#[serde(rename="teamDrive")]
pub team_drive: Option<TeamDriveReference>,
/// The target is a Drive item.
#[serde(rename="driveItem")]
pub drive_item: Option<DriveItemReference>,
/// The target is a shared drive.
pub drive: Option<DriveReference>,
}
impl Part for TargetReference {}
/// Information about a shared drive.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Drive {
/// The root of this shared drive.
pub root: Option<DriveItem>,
/// The resource name of the shared drive. The format is
/// "COLLECTION_ID/DRIVE_ID". Clients should not assume a specific collection
/// ID for this resource name.
pub name: Option<String>,
/// The title of the shared drive.
pub title: Option<String>,
}
impl Part for Drive {}
/// A comment on a file.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct FileComment {
/// The comment in the discussion thread. This identifier is an opaque string
/// compatible with the Drive API; see
/// https://developers.google.com/drive/v3/reference/comments/get
#[serde(rename="legacyCommentId")]
pub legacy_comment_id: Option<String>,
/// The discussion thread to which the comment was added. This identifier is an
/// opaque string compatible with the Drive API and references the first
/// comment in a discussion; see
/// https://developers.google.com/drive/v3/reference/comments/get
#[serde(rename="legacyDiscussionId")]
pub legacy_discussion_id: Option<String>,
/// The Drive item containing this comment.
pub parent: Option<DriveItem>,
/// The link to the discussion thread containing this comment, for example,
/// "https://docs.google.com/DOCUMENT_ID/edit?disco=THREAD_ID".
#[serde(rename="linkToDiscussion")]
pub link_to_discussion: Option<String>,
}
impl Part for FileComment {}
/// A suggestion.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Suggestion {
/// The sub-type of this event.
pub subtype: Option<String>,
}
impl Part for Suggestion {}
/// Information about the action.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Action {
/// The action occurred at this specific time.
pub timestamp: Option<String>,
/// The action occurred over this time range.
#[serde(rename="timeRange")]
pub time_range: Option<TimeRange>,
/// The type and detailed information about the action.
pub detail: Option<ActionDetail>,
/// The actor responsible for this action (or empty if all actors are
/// responsible).
pub actor: Option<Actor>,
/// The target this action affects (or empty if affecting all targets). This
/// represents the state of the target immediately after this action occurred.
pub target: Option<Target>,
}
impl Part for Action {}
/// A single Drive activity comprising one or more Actions by one or more
/// Actors on one or more Targets. Some Action groupings occur spontaneously,
/// such as moving an item into a shared folder triggering a permission change.
/// Other groupings of related Actions, such as multiple Actors editing one item
/// or moving multiple files into a new folder, are controlled by the selection
/// of a ConsolidationStrategy in the QueryDriveActivityRequest.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DriveActivity {
/// Key information about the primary action for this activity. This is either
/// representative, or the most important, of all actions in the activity,
/// according to the ConsolidationStrategy in the request.
#[serde(rename="primaryActionDetail")]
pub primary_action_detail: Option<ActionDetail>,
/// The activity occurred at this specific time.
pub timestamp: Option<String>,
/// The activity occurred over this time range.
#[serde(rename="timeRange")]
pub time_range: Option<TimeRange>,
/// All actor(s) responsible for the activity.
pub actors: Option<Vec<Actor>>,
/// Details on all actions in this activity.
pub actions: Option<Vec<Action>>,
/// All Google Drive objects this activity is about (e.g. file, folder, drive).
/// This represents the state of the target immediately after the actions
/// occurred.
pub targets: Option<Vec<Target>>,
}
impl Part for DriveActivity {}
/// A Drive item which is a folder.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DriveFolder {
/// The type of Drive folder.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for DriveFolder {}
/// An object was deleted.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Delete {
/// The type of delete action taken.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for Delete {}
/// An object was renamed.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Rename {
/// The new title of the drive object.
#[serde(rename="newTitle")]
pub new_title: Option<String>,
/// The previous title of the drive object.
#[serde(rename="oldTitle")]
pub old_title: Option<String>,
}
impl Part for Rename {}
/// A deleted object was restored.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Restore {
/// The type of restore action taken.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for Restore {}
/// Information about a domain.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Domain {
/// The name of the domain, e.g. "google.com".
pub name: Option<String>,
/// An opaque string used to identify this domain.
#[serde(rename="legacyId")]
pub legacy_id: Option<String>,
}
impl Part for Domain {}
/// The request message for querying Drive activity.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [query activity](struct.ActivityQueryCall.html) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct QueryDriveActivityRequest {
/// The filtering for items returned from this query request. The format of the
/// filter string is a sequence of expressions, joined by an optional "AND",
/// where each expression is of the form "field operator value".
///
/// Supported fields:
///
/// * <tt>time</tt>: Uses numerical operators on date values either in
/// terms of milliseconds since Jan 1, 1970 or in RFC 3339 format.
/// Examples:
///
/// * <tt>time > 1452409200000 AND time <= 1492812924310</tt>
/// * <tt>time >= "2016-01-10T01:02:03-05:00"</tt>
/// * <tt>detail.action_detail_case</tt>: Uses the "has" operator (:) and
/// either a singular value or a list of allowed action types enclosed in
/// parentheses.
/// Examples:
///
/// * <tt>detail.action_detail_case: RENAME</tt>
/// * <tt>detail.action_detail_case:(CREATE UPLOAD)</tt>
/// * <tt>-detail.action_detail_case:MOVE</tt>
pub filter: Option<String>,
/// The next_page_token value returned from a previous QueryDriveActivity
/// request, if any.
#[serde(rename="pageToken")]
pub page_token: Option<String>,
/// Details on how to consolidate related actions that make up the activity. If
/// not set, then related actions will not be consolidated.
#[serde(rename="consolidationStrategy")]
pub consolidation_strategy: Option<ConsolidationStrategy>,
/// The requested number of activity to return. If not set, a default value
/// will be used.
#[serde(rename="pageSize")]
pub page_size: Option<i32>,
/// Return activities for this Drive folder and all children and descendants.
/// The format is "items/ITEM_ID".
#[serde(rename="ancestorName")]
pub ancestor_name: Option<String>,
/// Return activities for this Drive item. The format is
/// "items/ITEM_ID".
#[serde(rename="itemName")]
pub item_name: Option<String>,
}
impl RequestValue for QueryDriveActivityRequest {}
/// A known user.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct KnownUser {
/// The identifier for this user that can be used with the People API to get
/// more information. The format is "people/ACCOUNT_ID". See
/// https://developers.google.com/people/.
#[serde(rename="personName")]
pub person_name: Option<String>,
/// True if this is the user making the request.
#[serde(rename="isCurrentUser")]
pub is_current_user: Option<bool>,
}
impl Part for KnownUser {}
/// An object was moved.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Move {
/// The removed parent object(s).
#[serde(rename="removedParents")]
pub removed_parents: Option<Vec<TargetReference>>,
/// The added parent object(s).
#[serde(rename="addedParents")]
pub added_parents: Option<Vec<TargetReference>>,
}
impl Part for Move {}
/// This item is deprecated; please see `DriveReference` instead.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TeamDriveReference {
/// This field is deprecated; please see `DriveReference.name` instead.
pub name: Option<String>,
/// This field is deprecated; please see `DriveReference.title` instead.
pub title: Option<String>,
}
impl Part for TeamDriveReference {}
/// How the individual activities are consolidated. A set of activities may be
/// consolidated into one combined activity if they are related in some way, such
/// as one actor performing the same action on multiple targets, or multiple
/// actors performing the same action on a single target. The strategy defines
/// the rules for which activities are related.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ConsolidationStrategy {
/// The individual activities are not consolidated.
pub none: Option<NoConsolidation>,
/// The individual activities are consolidated using the legacy strategy.
pub legacy: Option<Legacy>,
}
impl Part for ConsolidationStrategy {}
/// A strategy which consolidates activities using the grouping rules from the
/// legacy V1 Activity API. Similar actions occurring within a window of time
/// can be grouped across multiple targets (such as moving a set of files at
/// once) or multiple actors (such as several users editing the same item).
/// Grouping rules for this strategy are specific to each type of action.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Legacy { _never_set: Option<bool> }
impl Part for Legacy {}
/// Response message for querying Drive activity.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [query activity](struct.ActivityQueryCall.html) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct QueryDriveActivityResponse {
/// Token to retrieve the next page of results, or
/// empty if there are no more results in the list.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
/// List of activity requested.
pub activities: Option<Vec<DriveActivity>>,
}
impl ResponseResult for QueryDriveActivityResponse {}
/// Information about an end user.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct User {
/// A user about whom nothing is currently known.
#[serde(rename="unknownUser")]
pub unknown_user: Option<UnknownUser>,
/// A user whose account has since been deleted.
#[serde(rename="deletedUser")]
pub deleted_user: Option<DeletedUser>,
/// A known user.
#[serde(rename="knownUser")]
pub known_user: Option<KnownUser>,
}
impl Part for User {}
/// This item is deprecated; please see `DriveFile` instead.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct File { _never_set: Option<bool> }
impl Part for File {}
/// Information about settings changes.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct SettingsChange {
/// The set of changes made to restrictions.
#[serde(rename="restrictionChanges")]
pub restriction_changes: Option<Vec<RestrictionChange>>,
}
impl Part for SettingsChange {}
/// Information about the owner of a Drive item.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Owner {
/// This field is deprecated; please use the `drive` field instead.
#[serde(rename="teamDrive")]
pub team_drive: Option<TeamDriveReference>,
/// The domain of the Drive item owner.
pub domain: Option<Domain>,
/// The drive that owns the item.
pub drive: Option<DriveReference>,
/// The user that owns the Drive item.
pub user: Option<User>,
}
impl Part for Owner {}
// ###################
// MethodBuilders ###
// #################
/// A builder providing access to all methods supported on *activity* resources.
/// It is not used directly, but through the `DriveActivityHub` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_driveactivity2 as driveactivity2;
///
/// # #[test] fn egal() {
/// use std::default::Default;
/// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// use driveactivity2::DriveActivityHub;
///
/// let secret: ApplicationSecret = Default::default();
/// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// <MemoryStorage as Default>::default(), None);
/// let mut hub = DriveActivityHub::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `query(...)`
/// // to build up your call.
/// let rb = hub.activity();
/// # }
/// ```
pub struct ActivityMethods<'a, C, A>
where C: 'a, A: 'a {
hub: &'a DriveActivityHub<C, A>,
}
impl<'a, C, A> MethodsBuilder for ActivityMethods<'a, C, A> {}
impl<'a, C, A> ActivityMethods<'a, C, A> {
/// Create a builder to help you perform the following task:
///
/// Query past activity in Google Drive.
///
/// # Arguments
///
/// * `request` - No description provided.
pub fn query(&self, request: QueryDriveActivityRequest) -> ActivityQueryCall<'a, C, A> {
ActivityQueryCall {
hub: self.hub,
_request: request,
_delegate: Default::default(),
_scopes: Default::default(),
_additional_params: Default::default(),
}
}
}
// ###################
// CallBuilders ###
// #################
/// Query past activity in Google Drive.
///
/// A builder for the *query* method supported by a *activity* resource.
/// It is not used directly, but through a `ActivityMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_driveactivity2 as driveactivity2;
/// use driveactivity2::QueryDriveActivityRequest;
/// # #[test] fn egal() {
/// # use std::default::Default;
/// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// # use driveactivity2::DriveActivityHub;
///
/// # let secret: ApplicationSecret = Default::default();
/// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// # <MemoryStorage as Default>::default(), None);
/// # let mut hub = DriveActivityHub::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = QueryDriveActivityRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.activity().query(req)
/// .doit();
/// # }
/// ```
pub struct ActivityQueryCall<'a, C, A>
where C: 'a, A: 'a {
hub: &'a DriveActivityHub<C, A>,
_request: QueryDriveActivityRequest,
_delegate: Option<&'a mut Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a, C, A> CallBuilder for ActivityQueryCall<'a, C, A> {}
impl<'a, C, A> ActivityQueryCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken {
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, QueryDriveActivityResponse)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(MethodInfo { id: "driveactivity.activity.query",
http_method: hyper::method::Method::Post });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
for &field in ["alt"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v2/activity:query";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::DriveActivity.as_ref().to_string(), ());
}
let url = hyper::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default());
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) {
Ok(token) => token,
Err(err) => {
match dlg.token(&*err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(Error::MissingToken(err))
}
}
}
};
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Post, url.clone())
.header(UserAgent(self.hub._user_agent.clone()))
.header(auth_header.clone())
.header(ContentType(json_mime_type.clone()))
.header(ContentLength(request_size as u64))
.body(&mut request_value_reader);
dlg.pre_request();
req.send()
};
match req_result {
Err(err) => {
if let oauth2::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(Error::HttpError(err))
}
Ok(mut res) => {
if !res.status.is_success() {
let mut json_err = String::new();
res.read_to_string(&mut json_err).unwrap();
if let oauth2::Retry::After(d) = dlg.http_failure(&res,
json::from_str(&json_err).ok(),
json::from_str(&json_err).ok()) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<ErrorResponse>(&json_err){
Err(_) => Err(Error::Failure(res)),
Ok(serr) => Err(Error::BadRequest(serr))
}
}
let result_value = {
let mut json_response = String::new();
res.read_to_string(&mut json_response).unwrap();
match json::from_str(&json_response) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&json_response, &err);
return Err(Error::JsonDecodeError(json_response, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: QueryDriveActivityRequest) -> ActivityQueryCall<'a, C, A> {
self._request = new_value;
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut Delegate) -> ActivityQueryCall<'a, C, A> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *access_token* (query-string) - OAuth access token.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *callback* (query-string) - JSONP
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *alt* (query-string) - Data format for response.
/// * *$.xgafv* (query-string) - V1 error format.
pub fn param<T>(mut self, name: T, value: T) -> ActivityQueryCall<'a, C, A>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::DriveActivity`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ActivityQueryCall<'a, C, A>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
| 36.556368 | 240 | 0.65572 |
9cb5e3ae4a153881301ae77faad0c8abf9ac4b8a | 5,813 | use core::fmt::{Debug, Display, Formatter};
use core::marker::PhantomData;
use core::result::Result;
use std::cell::RefCell;
use std::rc::Rc;
extern crate alloc;
use alloc::vec::Vec;
use crate::event_bus;
use crate::timer;
#[derive(Debug)]
pub enum Error<E, T>
where
E: Display + Debug,
T: Display + Debug,
{
EventBusError(E),
TimerError(T),
}
impl<E, T> Display for Error<E, T>
where
E: Display + Debug,
T: Display + Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Error::EventBusError(s) => write!(f, "Event Bus Error {}", s),
Error::TimerError(w) => write!(f, "Timer Error {}", w),
}
}
}
#[cfg(feature = "std")]
impl<E, T> std::error::Error for Error<E, T>
where
E: Display + Debug,
T: Display + Debug,
// TODO
// where
// S: std::error::Error + 'static,
// W: std::error::Error + 'static,
{
// TODO
// fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
// match self {
// EventBusTimerError::EventBusError(s) => Some(s),
// EventBusTimerError::TimerError(w) => Some(w),
// }
// }
}
type TimerId = u32;
struct State {
timers_callbacks: Vec<(TimerId, Rc<RefCell<dyn FnMut() + 'static>>)>,
next_id: TimerId,
}
impl State {
fn new() -> Self {
Self {
timers_callbacks: Vec::new(),
next_id: 0,
}
}
fn call(&self, timer_id: TimerId) {
let callback = self
.timers_callbacks
.iter()
.find(|(id, _)| *id == timer_id)
.map(|(_, callback)| callback.clone());
if let Some(callback) = callback {
(callback.borrow_mut())();
}
}
fn add(&mut self, callback: Rc<RefCell<dyn FnMut() + 'static>>) -> TimerId {
if self.next_id == TimerId::max_value() {
panic!("Timer IDs exhausted");
}
let timer_id = self.next_id;
self.timers_callbacks.push((timer_id, callback));
self.next_id += 1;
timer_id
}
fn remove(&mut self, timer_id: TimerId) {
let index = self
.timers_callbacks
.iter()
.enumerate()
.find(|(_, (id, _))| *id == timer_id)
.map(|(index, _)| index)
.unwrap_or_else(|| panic!("Unknown timer ID: {}", timer_id));
self.timers_callbacks.remove(index);
}
}
pub struct Timer<T, ER> {
inner_timer: T,
id: TimerId,
state: Rc<RefCell<State>>,
_error_type: PhantomData<*const ER>,
}
impl<T, ER> timer::Timer for Timer<T, ER>
where
T: timer::Timer,
ER: Debug + Display + Send + Sync + 'static,
{
type Error = Error<ER, T::Error>;
fn once(&mut self, after: std::time::Duration) -> Result<(), Self::Error> {
self.inner_timer.once(after).map_err(Error::TimerError)
}
fn periodic(&mut self, after: std::time::Duration) -> Result<(), Self::Error> {
self.inner_timer.periodic(after).map_err(Error::TimerError)
}
fn is_scheduled(&self) -> Result<bool, Self::Error> {
self.inner_timer.is_scheduled().map_err(Error::TimerError)
}
fn cancel(&mut self) -> Result<bool, Self::Error> {
self.inner_timer.cancel().map_err(Error::TimerError)
}
}
impl<T, ER> Drop for Timer<T, ER> {
fn drop(&mut self) {
self.state.borrow_mut().remove(self.id);
}
}
pub struct PinnedTimerService<T, E, P>
where
E: event_bus::PinnedEventBus,
{
timer_service: T,
postbox: P,
state: Rc<RefCell<State>>,
_subscription: E::Subscription<TimerId>,
}
impl<T, E, P> PinnedTimerService<T, E, P>
where
T: timer::TimerService,
E: event_bus::PinnedEventBus,
P: event_bus::Postbox + Send + Clone,
P::Error: Into<E::Error>,
{
const EVENT_SOURCE: event_bus::Source<TimerId> =
event_bus::Source::new(b"PINNED_TIMER_SERVICE\0");
pub fn new(
timer_service: T,
event_bus: &E,
postbox: P,
) -> Result<Self, Error<E::Error, T::Error>> {
let state = Rc::new(RefCell::new(State::new()));
let state_subscription = state.clone();
let subscription = event_bus
.subscribe(Self::EVENT_SOURCE, move |timer_id| {
Result::<_, E::Error>::Ok(state_subscription.borrow().call(*timer_id))
})
.map_err(Error::EventBusError)?;
Ok(Self {
timer_service,
postbox,
state,
_subscription: subscription,
})
}
}
impl<T, E, P> timer::PinnedTimerService for PinnedTimerService<T, E, P>
where
T: timer::TimerService,
E: event_bus::PinnedEventBus,
P: event_bus::Postbox + Send + Clone + 'static,
P::Error: Into<E::Error>,
{
type Error = Error<E::Error, T::Error>;
type Timer = Timer<T::Timer, E::Error>;
fn timer<ER>(
&self,
conf: &timer::TimerConfiguration<'_>,
mut callback: impl FnMut() -> Result<(), ER> + 'static,
) -> Result<Self::Timer, Self::Error>
where
ER: Display + Debug + Sync + Send + 'static,
{
let timer_id = self
.state
.borrow_mut()
.add(Rc::new(RefCell::new(move || (callback)().unwrap())));
let postbox = self.postbox.clone();
Ok(Timer {
inner_timer: self
.timer_service
.timer(conf, move || {
postbox
.post(&Self::EVENT_SOURCE, &timer_id)
.map_err(Error::<_, T::Error>::EventBusError)
})
.map_err(Error::TimerError)?,
id: timer_id,
state: self.state.clone(),
_error_type: PhantomData,
})
}
}
| 25.384279 | 86 | 0.545501 |
abb184b1c83c1de661054b3ac07881b08820458d | 1,350 | //! Types and traits for working with JavaScript exceptions.
use std::fmt::{Display, Formatter, Result as FmtResult};
use handle::Handle;
use types::Value;
use context::Context;
/// An error sentinel type used by `NeonResult` (and `JsResult`) to indicate that the JavaScript engine
/// has entered into a throwing state.
///
/// `Throw` deliberately does not implement `std::error::Error`, because it's generally not a good idea
/// to chain JavaScript exceptions with other kinds of Rust errors, since entering into the throwing
/// state means that the JavaScript engine is unavailable until the exception is handled.
#[derive(Debug)]
pub struct Throw;
impl Display for Throw {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
fmt.write_str("JavaScript Error")
}
}
/// The result of a computation that might send the JS engine into a throwing state.
pub type NeonResult<T> = Result<T, Throw>;
/// The result of a computation that produces a JavaScript value and might send the JS engine into a throwing state.
pub type JsResult<'b, T> = NeonResult<Handle<'b, T>>;
/// An extension trait for `Result` values that can be converted into `JsResult` values by throwing a JavaScript
/// exception in the error case.
pub trait JsResultExt<'a, V: Value> {
fn or_throw<'b, C: Context<'b>>(self, cx: &mut C) -> JsResult<'a, V>;
}
| 39.705882 | 116 | 0.722222 |
76e02cd3ce5a3d2c723bb69d3c4da2901430f978 | 1,214 | // Copyright (c) 2021 Quark Container Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::fmt;
use core::fmt::Write;
use core::fmt::Arguments;
pub struct KPrintStruct {}
impl fmt::Write for KPrintStruct {
fn write_str(&mut self, s: &str) -> fmt::Result {
print!("{}", s);
Ok(())
}
}
#[macro_export]
macro_rules! kprint {
($($arg:tt)*) => ($crate::kvmlib::print::k_print(format_args!($($arg)*)));
}
#[macro_export]
macro_rules! kprintln {
() => ($crate::kprint!("\n"));
($($arg:tt)*) => (kprint!("{}\n", format_args!($($arg)*)));
}
#[doc(hidden)]
pub fn k_print(args: Arguments) {
let mut ps = KPrintStruct {};
ps.write_fmt(args).unwrap();
} | 28.232558 | 78 | 0.656507 |
d7ac154037a123209f947e1f7b949b99f050c143 | 2,749 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::PRE_REF1 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct FSK_PREAMBLE_REF1R {
bits: u32,
}
impl FSK_PREAMBLE_REF1R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _FSK_PREAMBLE_REF1W<'a> {
w: &'a mut W,
}
impl<'a> _FSK_PREAMBLE_REF1W<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:31 - Base preamble reference waveform containing sixteen 5-bit phase values"]
#[inline]
pub fn fsk_preamble_ref1(&self) -> FSK_PREAMBLE_REF1R {
let bits = {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u32
};
FSK_PREAMBLE_REF1R { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 3204186111 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:31 - Base preamble reference waveform containing sixteen 5-bit phase values"]
#[inline]
pub fn fsk_preamble_ref1(&mut self) -> _FSK_PREAMBLE_REF1W {
_FSK_PREAMBLE_REF1W { w: self }
}
}
| 25.933962 | 97 | 0.53474 |
1cb4d751345f95a1f3477ea65115d18d64cd8cae | 7,171 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The special-purpose event loop used by the reachability monitor.
//!
//! This event loop receives events from netstack. Thsose events are used by the reachability
//! monitor to infer the connectivity state.
use {
crate::worker::{EventWorker, TimerWorker},
failure::Error,
fuchsia_async as fasync, fuchsia_zircon as zx,
futures::channel::mpsc,
futures::prelude::*,
reachability_core::{Monitor, Timer},
};
/// The events that can trigger an action in the event loop.
#[derive(Debug)]
pub enum Event {
/// An event coming from fuchsia.net.stack.
StackEvent(fidl_fuchsia_net_stack::StackEvent),
/// An event coming from fuchsia.netstack.
NetstackEvent(fidl_fuchsia_netstack::NetstackEvent),
/// A timer firing.
TimerEvent(u64),
}
/// The event loop.
pub struct EventLoop {
event_recv: mpsc::UnboundedReceiver<Event>,
monitor: Monitor,
}
struct EventTimer {
event_send: mpsc::UnboundedSender<Event>,
}
impl Timer for EventTimer {
fn periodic(&self, duration: zx::Duration, id: u64) -> i64 {
let timer_worker = TimerWorker;
timer_worker.spawn(fasync::Interval::new(duration), self.event_send.clone(), id);
0
}
}
impl EventLoop {
/// `new` returns an `EventLoop` instance.
pub fn new(mut monitor: Monitor) -> Self {
let (event_send, event_recv) = futures::channel::mpsc::unbounded::<Event>();
let streams = monitor.take_event_streams();
let event_worker = EventWorker;
event_worker.spawn(streams, event_send.clone());
let timer = EventTimer { event_send: event_send.clone() };
monitor.set_timer(Box::new(timer));
EventLoop { event_recv, monitor }
}
/// `run` starts the event loop.
pub async fn run(&mut self) -> Result<(), Error> {
debug!("starting event loop");
while let Some(e) = self.event_recv.next().await {
match e {
Event::StackEvent(event) => self.handle_stack_event(event).await,
Event::NetstackEvent(event) => self.handle_netstack_event(event).await,
Event::TimerEvent(id) => {
self.handle_timer_firing(id).await;
}
}
}
Ok(())
}
async fn handle_timer_firing(&mut self, id: u64) {
self.monitor
.timer_event(id)
.await
.unwrap_or_else(|err| error!("error updating state: {:?}", err));
}
async fn handle_stack_event(&mut self, event: fidl_fuchsia_net_stack::StackEvent) {
debug!("stack event received {:#?}", event);
self.monitor
.stack_event(event)
.await
.unwrap_or_else(|err| error!("error updating state: {:?}", err));
}
async fn handle_netstack_event(&mut self, event: fidl_fuchsia_netstack::NetstackEvent) {
debug!("netstack event received {:#?}", event);
self.monitor
.netstack_event(event)
.await
.unwrap_or_else(|err| error!("error updating state: {:?}", err));
}
}
#[cfg(test)]
mod tests {
use super::*;
use fidl_fuchsia_net as net;
use fidl_fuchsia_netstack as netstack;
use fuchsia_async as fasync;
use fuchsia_async::TimeoutExt;
use reachability_core::Pinger;
/// log::Log implementation that uses stdout.
///
/// Useful when debugging tests.
struct Logger {}
impl log::Log for Logger {
fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool {
true
}
fn log(&self, record: &log::Record<'_>) {
//self.printer.println(
println!(
"[{}] ({}) {}",
record.level(),
record.module_path().unwrap_or(""),
record.args(),
)
}
fn flush(&self) {}
}
fn net_interface(port: u32, addr: [u8; 4]) -> netstack::NetInterface {
netstack::NetInterface {
id: port,
flags: netstack::NET_INTERFACE_FLAG_UP | netstack::NET_INTERFACE_FLAG_DHCP,
features: 0,
configuration: 0,
name: port.to_string(),
addr: net::IpAddress::Ipv4(net::Ipv4Address { addr }),
netmask: net::IpAddress::Ipv4(net::Ipv4Address { addr: [255, 255, 255, 0] }),
broadaddr: net::IpAddress::Ipv4(net::Ipv4Address { addr: [1, 2, 3, 255] }),
ipv6addrs: vec![],
hwaddr: [1, 2, 3, 4, 5, port as u8].to_vec(),
}
}
struct Ping<'a> {
gateway_url: &'a str,
gateway_response: bool,
internet_url: &'a str,
internet_response: bool,
default_response: bool,
}
impl Pinger for Ping<'_> {
fn ping(&self, url: &str) -> bool {
if self.gateway_url == url {
return self.gateway_response;
}
if self.internet_url == url {
return self.internet_response;
}
self.default_response
}
}
#[fasync::run_singlethreaded(test)]
async fn test_events_are_received() {
let (event_send, event_recv) = futures::channel::mpsc::unbounded::<Event>();
let mut monitor = Monitor::new(Box::new(Ping {
gateway_url: "1.2.3.1",
gateway_response: true,
internet_url: "8.8.8.8",
internet_response: false,
default_response: false,
}))
.unwrap();
let streams = monitor.take_event_streams();
let event_worker = EventWorker;
event_worker.spawn(streams, event_send.clone());
let mut event_loop = EventLoop { event_recv, monitor };
fasync::spawn_local(async {
// Send event to it
let e = Event::NetstackEvent(netstack::NetstackEvent::OnInterfacesChanged {
interfaces: vec![net_interface(5, [1, 2, 3, 1])],
});
event_send.unbounded_send(e).unwrap();
let e = Event::NetstackEvent(netstack::NetstackEvent::OnInterfacesChanged {
interfaces: vec![net_interface(5, [1, 2, 3, 2])],
});
event_send.unbounded_send(e).unwrap();
let e = Event::NetstackEvent(netstack::NetstackEvent::OnInterfacesChanged {
interfaces: vec![net_interface(5, [1, 2, 3, 3])],
});
event_send.unbounded_send(e).unwrap();
let e = Event::NetstackEvent(netstack::NetstackEvent::OnInterfacesChanged {
interfaces: vec![net_interface(5, [1, 2, 3, 4])],
});
event_send.unbounded_send(e).unwrap();
drop(event_send);
});
let x = event_loop
.run()
.on_timeout(fasync::Time::after(fuchsia_zircon::Duration::from_seconds(10)), || {
panic!("timed out")
})
.await;
println!("eventloop result {:?}", x);
assert_eq!(event_loop.monitor.stats().events, 4);
}
}
| 33.199074 | 93 | 0.574815 |
b97f59395fd6798c67a740242881e0aafa6214b6 | 13,804 | #[doc = "Reader of register CTRL2_SET"]
pub type R = crate::R<u32, super::CTRL2_SET>;
#[doc = "Writer for register CTRL2_SET"]
pub type W = crate::W<u32, super::CTRL2_SET>;
#[doc = "Register CTRL2_SET `reset()`'s with value 0x0020_0000"]
impl crate::ResetValue for super::CTRL2_SET {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x0020_0000
}
}
#[doc = "This field determines the order of the RGB components of each pixel in EVEN lines (line numbers 2,4,6,\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum EVEN_LINE_PATTERN_A {
#[doc = "0: RGB"]
RGB = 0,
#[doc = "1: RBG"]
RBG = 1,
#[doc = "2: GBR"]
GBR = 2,
#[doc = "3: GRB"]
GRB = 3,
#[doc = "4: BRG"]
BRG = 4,
#[doc = "5: BGR"]
BGR = 5,
}
impl From<EVEN_LINE_PATTERN_A> for u8 {
#[inline(always)]
fn from(variant: EVEN_LINE_PATTERN_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `EVEN_LINE_PATTERN`"]
pub type EVEN_LINE_PATTERN_R = crate::R<u8, EVEN_LINE_PATTERN_A>;
impl EVEN_LINE_PATTERN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, EVEN_LINE_PATTERN_A> {
use crate::Variant::*;
match self.bits {
0 => Val(EVEN_LINE_PATTERN_A::RGB),
1 => Val(EVEN_LINE_PATTERN_A::RBG),
2 => Val(EVEN_LINE_PATTERN_A::GBR),
3 => Val(EVEN_LINE_PATTERN_A::GRB),
4 => Val(EVEN_LINE_PATTERN_A::BRG),
5 => Val(EVEN_LINE_PATTERN_A::BGR),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `RGB`"]
#[inline(always)]
pub fn is_rgb(&self) -> bool {
*self == EVEN_LINE_PATTERN_A::RGB
}
#[doc = "Checks if the value of the field is `RBG`"]
#[inline(always)]
pub fn is_rbg(&self) -> bool {
*self == EVEN_LINE_PATTERN_A::RBG
}
#[doc = "Checks if the value of the field is `GBR`"]
#[inline(always)]
pub fn is_gbr(&self) -> bool {
*self == EVEN_LINE_PATTERN_A::GBR
}
#[doc = "Checks if the value of the field is `GRB`"]
#[inline(always)]
pub fn is_grb(&self) -> bool {
*self == EVEN_LINE_PATTERN_A::GRB
}
#[doc = "Checks if the value of the field is `BRG`"]
#[inline(always)]
pub fn is_brg(&self) -> bool {
*self == EVEN_LINE_PATTERN_A::BRG
}
#[doc = "Checks if the value of the field is `BGR`"]
#[inline(always)]
pub fn is_bgr(&self) -> bool {
*self == EVEN_LINE_PATTERN_A::BGR
}
}
#[doc = "Write proxy for field `EVEN_LINE_PATTERN`"]
pub struct EVEN_LINE_PATTERN_W<'a> {
w: &'a mut W,
}
impl<'a> EVEN_LINE_PATTERN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EVEN_LINE_PATTERN_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "RGB"]
#[inline(always)]
pub fn rgb(self) -> &'a mut W {
self.variant(EVEN_LINE_PATTERN_A::RGB)
}
#[doc = "RBG"]
#[inline(always)]
pub fn rbg(self) -> &'a mut W {
self.variant(EVEN_LINE_PATTERN_A::RBG)
}
#[doc = "GBR"]
#[inline(always)]
pub fn gbr(self) -> &'a mut W {
self.variant(EVEN_LINE_PATTERN_A::GBR)
}
#[doc = "GRB"]
#[inline(always)]
pub fn grb(self) -> &'a mut W {
self.variant(EVEN_LINE_PATTERN_A::GRB)
}
#[doc = "BRG"]
#[inline(always)]
pub fn brg(self) -> &'a mut W {
self.variant(EVEN_LINE_PATTERN_A::BRG)
}
#[doc = "BGR"]
#[inline(always)]
pub fn bgr(self) -> &'a mut W {
self.variant(EVEN_LINE_PATTERN_A::BGR)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 12)) | (((value as u32) & 0x07) << 12);
self.w
}
}
#[doc = "This field determines the order of the RGB components of each pixel in ODD lines (line numbers 1,3,5,\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ODD_LINE_PATTERN_A {
#[doc = "0: RGB"]
RGB = 0,
#[doc = "1: RBG"]
RBG = 1,
#[doc = "2: GBR"]
GBR = 2,
#[doc = "3: GRB"]
GRB = 3,
#[doc = "4: BRG"]
BRG = 4,
#[doc = "5: BGR"]
BGR = 5,
}
impl From<ODD_LINE_PATTERN_A> for u8 {
#[inline(always)]
fn from(variant: ODD_LINE_PATTERN_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `ODD_LINE_PATTERN`"]
pub type ODD_LINE_PATTERN_R = crate::R<u8, ODD_LINE_PATTERN_A>;
impl ODD_LINE_PATTERN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, ODD_LINE_PATTERN_A> {
use crate::Variant::*;
match self.bits {
0 => Val(ODD_LINE_PATTERN_A::RGB),
1 => Val(ODD_LINE_PATTERN_A::RBG),
2 => Val(ODD_LINE_PATTERN_A::GBR),
3 => Val(ODD_LINE_PATTERN_A::GRB),
4 => Val(ODD_LINE_PATTERN_A::BRG),
5 => Val(ODD_LINE_PATTERN_A::BGR),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `RGB`"]
#[inline(always)]
pub fn is_rgb(&self) -> bool {
*self == ODD_LINE_PATTERN_A::RGB
}
#[doc = "Checks if the value of the field is `RBG`"]
#[inline(always)]
pub fn is_rbg(&self) -> bool {
*self == ODD_LINE_PATTERN_A::RBG
}
#[doc = "Checks if the value of the field is `GBR`"]
#[inline(always)]
pub fn is_gbr(&self) -> bool {
*self == ODD_LINE_PATTERN_A::GBR
}
#[doc = "Checks if the value of the field is `GRB`"]
#[inline(always)]
pub fn is_grb(&self) -> bool {
*self == ODD_LINE_PATTERN_A::GRB
}
#[doc = "Checks if the value of the field is `BRG`"]
#[inline(always)]
pub fn is_brg(&self) -> bool {
*self == ODD_LINE_PATTERN_A::BRG
}
#[doc = "Checks if the value of the field is `BGR`"]
#[inline(always)]
pub fn is_bgr(&self) -> bool {
*self == ODD_LINE_PATTERN_A::BGR
}
}
#[doc = "Write proxy for field `ODD_LINE_PATTERN`"]
pub struct ODD_LINE_PATTERN_W<'a> {
w: &'a mut W,
}
impl<'a> ODD_LINE_PATTERN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ODD_LINE_PATTERN_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "RGB"]
#[inline(always)]
pub fn rgb(self) -> &'a mut W {
self.variant(ODD_LINE_PATTERN_A::RGB)
}
#[doc = "RBG"]
#[inline(always)]
pub fn rbg(self) -> &'a mut W {
self.variant(ODD_LINE_PATTERN_A::RBG)
}
#[doc = "GBR"]
#[inline(always)]
pub fn gbr(self) -> &'a mut W {
self.variant(ODD_LINE_PATTERN_A::GBR)
}
#[doc = "GRB"]
#[inline(always)]
pub fn grb(self) -> &'a mut W {
self.variant(ODD_LINE_PATTERN_A::GRB)
}
#[doc = "BRG"]
#[inline(always)]
pub fn brg(self) -> &'a mut W {
self.variant(ODD_LINE_PATTERN_A::BRG)
}
#[doc = "BGR"]
#[inline(always)]
pub fn bgr(self) -> &'a mut W {
self.variant(ODD_LINE_PATTERN_A::BGR)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 16)) | (((value as u32) & 0x07) << 16);
self.w
}
}
#[doc = "Reader of field `BURST_LEN_8`"]
pub type BURST_LEN_8_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `BURST_LEN_8`"]
pub struct BURST_LEN_8_W<'a> {
w: &'a mut W,
}
impl<'a> BURST_LEN_8_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "This bitfield indicates the maximum number of outstanding transactions that LCDIF should request when it is acting as a bus master\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum OUTSTANDING_REQS_A {
#[doc = "0: REQ_1"]
REQ_1 = 0,
#[doc = "1: REQ_2"]
REQ_2 = 1,
#[doc = "2: REQ_4"]
REQ_4 = 2,
#[doc = "3: REQ_8"]
REQ_8 = 3,
#[doc = "4: REQ_16"]
REQ_16 = 4,
}
impl From<OUTSTANDING_REQS_A> for u8 {
#[inline(always)]
fn from(variant: OUTSTANDING_REQS_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `OUTSTANDING_REQS`"]
pub type OUTSTANDING_REQS_R = crate::R<u8, OUTSTANDING_REQS_A>;
impl OUTSTANDING_REQS_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, OUTSTANDING_REQS_A> {
use crate::Variant::*;
match self.bits {
0 => Val(OUTSTANDING_REQS_A::REQ_1),
1 => Val(OUTSTANDING_REQS_A::REQ_2),
2 => Val(OUTSTANDING_REQS_A::REQ_4),
3 => Val(OUTSTANDING_REQS_A::REQ_8),
4 => Val(OUTSTANDING_REQS_A::REQ_16),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `REQ_1`"]
#[inline(always)]
pub fn is_req_1(&self) -> bool {
*self == OUTSTANDING_REQS_A::REQ_1
}
#[doc = "Checks if the value of the field is `REQ_2`"]
#[inline(always)]
pub fn is_req_2(&self) -> bool {
*self == OUTSTANDING_REQS_A::REQ_2
}
#[doc = "Checks if the value of the field is `REQ_4`"]
#[inline(always)]
pub fn is_req_4(&self) -> bool {
*self == OUTSTANDING_REQS_A::REQ_4
}
#[doc = "Checks if the value of the field is `REQ_8`"]
#[inline(always)]
pub fn is_req_8(&self) -> bool {
*self == OUTSTANDING_REQS_A::REQ_8
}
#[doc = "Checks if the value of the field is `REQ_16`"]
#[inline(always)]
pub fn is_req_16(&self) -> bool {
*self == OUTSTANDING_REQS_A::REQ_16
}
}
#[doc = "Write proxy for field `OUTSTANDING_REQS`"]
pub struct OUTSTANDING_REQS_W<'a> {
w: &'a mut W,
}
impl<'a> OUTSTANDING_REQS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OUTSTANDING_REQS_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "REQ_1"]
#[inline(always)]
pub fn req_1(self) -> &'a mut W {
self.variant(OUTSTANDING_REQS_A::REQ_1)
}
#[doc = "REQ_2"]
#[inline(always)]
pub fn req_2(self) -> &'a mut W {
self.variant(OUTSTANDING_REQS_A::REQ_2)
}
#[doc = "REQ_4"]
#[inline(always)]
pub fn req_4(self) -> &'a mut W {
self.variant(OUTSTANDING_REQS_A::REQ_4)
}
#[doc = "REQ_8"]
#[inline(always)]
pub fn req_8(self) -> &'a mut W {
self.variant(OUTSTANDING_REQS_A::REQ_8)
}
#[doc = "REQ_16"]
#[inline(always)]
pub fn req_16(self) -> &'a mut W {
self.variant(OUTSTANDING_REQS_A::REQ_16)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 21)) | (((value as u32) & 0x07) << 21);
self.w
}
}
impl R {
#[doc = "Bits 12:14 - This field determines the order of the RGB components of each pixel in EVEN lines (line numbers 2,4,6,"]
#[inline(always)]
pub fn even_line_pattern(&self) -> EVEN_LINE_PATTERN_R {
EVEN_LINE_PATTERN_R::new(((self.bits >> 12) & 0x07) as u8)
}
#[doc = "Bits 16:18 - This field determines the order of the RGB components of each pixel in ODD lines (line numbers 1,3,5,"]
#[inline(always)]
pub fn odd_line_pattern(&self) -> ODD_LINE_PATTERN_R {
ODD_LINE_PATTERN_R::new(((self.bits >> 16) & 0x07) as u8)
}
#[doc = "Bit 20 - By default, when the LCDIF is in the bus master mode, it will issue AXI bursts of length 16 (except when in packed 24 bpp mode, it will issue bursts of length 15)"]
#[inline(always)]
pub fn burst_len_8(&self) -> BURST_LEN_8_R {
BURST_LEN_8_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bits 21:23 - This bitfield indicates the maximum number of outstanding transactions that LCDIF should request when it is acting as a bus master"]
#[inline(always)]
pub fn outstanding_reqs(&self) -> OUTSTANDING_REQS_R {
OUTSTANDING_REQS_R::new(((self.bits >> 21) & 0x07) as u8)
}
}
impl W {
#[doc = "Bits 12:14 - This field determines the order of the RGB components of each pixel in EVEN lines (line numbers 2,4,6,"]
#[inline(always)]
pub fn even_line_pattern(&mut self) -> EVEN_LINE_PATTERN_W {
EVEN_LINE_PATTERN_W { w: self }
}
#[doc = "Bits 16:18 - This field determines the order of the RGB components of each pixel in ODD lines (line numbers 1,3,5,"]
#[inline(always)]
pub fn odd_line_pattern(&mut self) -> ODD_LINE_PATTERN_W {
ODD_LINE_PATTERN_W { w: self }
}
#[doc = "Bit 20 - By default, when the LCDIF is in the bus master mode, it will issue AXI bursts of length 16 (except when in packed 24 bpp mode, it will issue bursts of length 15)"]
#[inline(always)]
pub fn burst_len_8(&mut self) -> BURST_LEN_8_W {
BURST_LEN_8_W { w: self }
}
#[doc = "Bits 21:23 - This bitfield indicates the maximum number of outstanding transactions that LCDIF should request when it is acting as a bus master"]
#[inline(always)]
pub fn outstanding_reqs(&mut self) -> OUTSTANDING_REQS_W {
OUTSTANDING_REQS_W { w: self }
}
}
| 32.7109 | 186 | 0.580846 |
626e496a88848ed5839d7ff5b941f703841ec246 | 4,925 | // Copyright 2017, Nick Renieris
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs::File;
use std::path::{Path, PathBuf};
use std::io::prelude::*;
use futures;
use futures::future::Future;
use hyper;
use hyper::header::*;
use hyper::server::{Http, Request, Response, Service};
use hyper::{Method, StatusCode};
use glob::glob;
use std::sync::Mutex;
struct MerkleTreeExplorer {
// stores the path to the data directory
data_path: String,
}
impl Service for MerkleTreeExplorer {
// boilerplate hooking up hyper's server types
type Request = Request;
type Response = Response;
type Error = hyper::Error;
// The future representing the eventual Response our call will resolve to
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, req: Request) -> Self::Future {
let mut response = Response::new().with_header(ContentType::plaintext());
let mut contents = String::new();
// read directory SHA
let path_str = self.data_path.clone() + req.path();
let path = Path::new(&path_str);
if path.is_dir() {
// read root SHA
read_sha_path(&path, &mut contents, false, false);
// read children SHAs
let glob_path = path.join("*");
for entry in glob(&glob_path.to_str().unwrap()).expect("Failed to read glob pattern") {
match entry {
Ok(path) => {
if path.is_dir() {
// Directory
// Print the directory name and the hash
read_sha_path(&path, &mut contents, true, false);
} else {
// File
// Skip .sha1 files
if path.to_str().unwrap().ends_with(".sha1") {
continue;
};
// Print the file name and the hash
read_sha_path(&path, &mut contents, true, true);
}
}
Err(e) => println!("{:?}", e),
}
}
} else {
// just serve the static file
match File::open(&path) {
Ok(file) => {
(&file)
.read_to_string(&mut contents)
.expect("something went wrong reading the file");
}
Err(why) => {
// error!("couldn't read file {:?}, error {:?}", path, why);
response.set_status(StatusCode::NotFound);
contents.push_str("not there");
}
}
}
if req.method() == &Method::Get {
response.set_body(contents);
} else {
response.set_status(StatusCode::NotFound);
}
Box::new(futures::future::ok(response))
}
}
fn read_sha_path(path: &Path, contents: &mut String, incl_filename: bool, is_file: bool) {
let sha1_path: PathBuf = if is_file {
// if it's a file we need to just add ".sha" at the end
let mut pathbuf = PathBuf::new();
pathbuf.push(path.to_str().unwrap().to_string() + ".sha1");
pathbuf
} else {
// if it's a directory we need to also add the / ("/.sha")
path.join(".sha1")
};
debug!("reading file: {:?}", &sha1_path);
match File::open(&sha1_path) {
Ok(file) => {
if incl_filename {
contents.push_str(path.file_name().unwrap().to_str().unwrap());
contents.push(' ');
}
(&file)
.read_to_string(contents)
.expect("something went wrong reading the file");
contents.push('\n');
}
Err(why) => error!("couldn't read file {:?}, error {:?}", sha1_path, why),
}
}
pub fn run_server(path: String, port: u16) {
let addr = format!("127.0.0.1:{}", port).parse().unwrap();
// Create and run the HTTP server
let server = Http::new()
.bind(&addr, move || {
Ok(// Create the MerkleTreeExplorer object and update its data_path field
MerkleTreeExplorer { data_path: path.to_owned() })
})
.unwrap();
server.run().unwrap();
}
| 33.053691 | 99 | 0.525685 |
d90a7b4f4fe9f8d40eb32c0c2a465e49d9483441 | 3,369 | use nu_test_support::fs::Stub::FileWithContent;
use nu_test_support::playground::Playground;
use nu_test_support::{nu, nu_error};
#[test]
fn can_only_apply_one() {
let actual = nu_error!(
cwd: "tests/fixtures/formats",
"open cargo_sample.toml | first 1 | inc package.version --major --minor"
);
assert!(actual.contains("Usage: inc field [--major|--minor|--patch]"));
}
#[test]
fn by_one_with_field_passed() {
Playground::setup("plugin_inc_test_1", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContent(
"sample.toml",
r#"
[package]
edition = "2018"
"#,
)]);
let actual = nu!(
cwd: dirs.test(),
"open sample.toml | inc package.edition | get package.edition | echo $it"
);
assert_eq!(actual, "2019");
})
}
#[test]
fn by_one_with_no_field_passed() {
Playground::setup("plugin_inc_test_2", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContent(
"sample.toml",
r#"
[package]
contributors = "2"
"#,
)]);
let actual = nu!(
cwd: dirs.test(),
"open sample.toml | get package.contributors | inc | echo $it"
);
assert_eq!(actual, "3");
})
}
#[test]
fn semversion_major_inc() {
Playground::setup("plugin_inc_test_3", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContent(
"sample.toml",
r#"
[package]
version = "0.1.3"
"#,
)]);
let actual = nu!(
cwd: dirs.test(),
"open sample.toml | inc package.version -M | get package.version | echo $it"
);
assert_eq!(actual, "1.0.0");
})
}
#[test]
fn semversion_minor_inc() {
Playground::setup("plugin_inc_test_4", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContent(
"sample.toml",
r#"
[package]
version = "0.1.3"
"#,
)]);
let actual = nu!(
cwd: dirs.test(),
"open sample.toml | inc package.version --minor | get package.version | echo $it"
);
assert_eq!(actual, "0.2.0");
})
}
#[test]
fn semversion_patch_inc() {
Playground::setup("plugin_inc_test_5", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContent(
"sample.toml",
r#"
[package]
version = "0.1.3"
"#,
)]);
let actual = nu!(
cwd: dirs.test(),
"open sample.toml | inc package.version --patch | get package.version | echo $it"
);
assert_eq!(actual, "0.1.4");
})
}
#[test]
fn semversion_without_passing_field() {
Playground::setup("plugin_inc_test_6", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContent(
"sample.toml",
r#"
[package]
version = "0.1.3"
"#,
)]);
let actual = nu!(
cwd: dirs.test(),
"open sample.toml | get package.version | inc --patch | echo $it"
);
assert_eq!(actual, "0.1.4");
})
}
| 25.141791 | 93 | 0.488275 |
dee93180e70d50586f4bec5fcb5c194d20f3ecbb | 2,261 | // This file is part of Substrate.
// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::chain_spec::ChainSpec;
use browser_utils::{browser_configuration, init_logging, set_console_error_panic_hook, Client};
use log::info;
use wasm_bindgen::prelude::*;
/// Starts the client.
#[wasm_bindgen]
pub fn start_client(chain_spec: Option<String>, log_level: String) -> Result<Client, JsValue> {
start_inner(chain_spec, log_level).map_err(|err| JsValue::from_str(&err.to_string()))
}
fn start_inner(
chain_spec: Option<String>,
log_directives: String,
) -> Result<Client, Box<dyn std::error::Error>> {
set_console_error_panic_hook();
init_logging(&log_directives)?;
let chain_spec = match chain_spec {
Some(chain_spec) => ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec())
.map_err(|e| format!("{:?}", e))?,
None => crate::chain_spec::development_config(),
};
let config = browser_configuration(chain_spec)?;
info!("Substrate browser node");
info!("✌️ version {}", config.impl_version);
info!("❤️ by Parity Technologies, 2017-2021");
info!("📋 Chain specification: {}", config.chain_spec.name());
info!("🏷 Node name: {}", config.network.node_name);
info!("👤 Role: {:?}", config.role);
// Create the service. This is the most heavy initialization step.
let (task_manager, rpc_handlers) = crate::service::new_light_base(config)
.map(|(components, rpc_handlers, _, _, _)| (components, rpc_handlers))
.map_err(|e| format!("{:?}", e))?;
Ok(browser_utils::start_client(task_manager, rpc_handlers))
}
| 38.982759 | 95 | 0.728439 |
e4dcd2f5227cfb9dc6a3507d23b8c1df07014acc | 811 | use core::parse;
use std::io::{self, Write};
fn main() -> io::Result<()> {
let stdin = io::stdin();
let mut stdout = io::stdout();
let mut input = String::new();
loop {
write!(stdout, "→ ")?;
stdout.flush()?;
stdin.read_line(&mut input)?;
let trimmed_input = input.trim_end();
let parse = parse(trimmed_input);
println!("{}", parse.debug_tree());
let root = ast::Root::cast(parse.syntax()).unwrap();
let var_defs = root.expr().map(|expr| {
if let ast::Expr::VariableDef(var_def) = expr {
Some(var_def.value())
} else {
None
}
});
dbg!(var_defs);
let (_, hir) = hir::lower(root);
dbg!(hir);
input.clear();
}
}
| 21.342105 | 60 | 0.478422 |
f96e4bdd28214a96e555530139bd3497566e48d2 | 14,024 | // Generated by gir (https://github.com/gtk-rs/gir @ 350409c)
// from .. (@ ed21eb4+)
// from ../gir-files (@ 38b7451)
// DO NOT EDIT
use gweather_sys::*;
use std::mem::{align_of, size_of};
use std::env;
use std::error::Error;
use std::ffi::OsString;
use std::path::Path;
use std::process::Command;
use std::str;
use tempfile::Builder;
static PACKAGES: &[&str] = &["gweather-3.0"];
#[derive(Clone, Debug)]
struct Compiler {
pub args: Vec<String>,
}
impl Compiler {
pub fn new() -> Result<Self, Box<dyn Error>> {
let mut args = get_var("CC", "cc")?;
args.push("-Wno-deprecated-declarations".to_owned());
// For _Generic
args.push("-std=c11".to_owned());
// For %z support in printf when using MinGW.
args.push("-D__USE_MINGW_ANSI_STDIO".to_owned());
args.extend(get_var("CFLAGS", "")?);
args.extend(get_var("CPPFLAGS", "")?);
args.extend(pkg_config_cflags(PACKAGES)?);
Ok(Self { args })
}
pub fn compile(&self, src: &Path, out: &Path) -> Result<(), Box<dyn Error>> {
let mut cmd = self.to_command();
cmd.arg(src);
cmd.arg("-o");
cmd.arg(out);
let status = cmd.spawn()?.wait()?;
if !status.success() {
return Err(format!("compilation command {:?} failed, {}", &cmd, status).into());
}
Ok(())
}
fn to_command(&self) -> Command {
let mut cmd = Command::new(&self.args[0]);
cmd.args(&self.args[1..]);
cmd
}
}
fn get_var(name: &str, default: &str) -> Result<Vec<String>, Box<dyn Error>> {
match env::var(name) {
Ok(value) => Ok(shell_words::split(&value)?),
Err(env::VarError::NotPresent) => Ok(shell_words::split(default)?),
Err(err) => Err(format!("{} {}", name, err).into()),
}
}
fn pkg_config_cflags(packages: &[&str]) -> Result<Vec<String>, Box<dyn Error>> {
if packages.is_empty() {
return Ok(Vec::new());
}
let pkg_config = env::var_os("PKG_CONFIG")
.unwrap_or_else(|| OsString::from("pkg-config"));
let mut cmd = Command::new(pkg_config);
cmd.arg("--cflags");
cmd.args(packages);
let out = cmd.output()?;
if !out.status.success() {
return Err(format!("command {:?} returned {}",
&cmd, out.status).into());
}
let stdout = str::from_utf8(&out.stdout)?;
Ok(shell_words::split(stdout.trim())?)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct Layout {
size: usize,
alignment: usize,
}
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
struct Results {
/// Number of successfully completed tests.
passed: usize,
/// Total number of failed tests (including those that failed to compile).
failed: usize,
}
impl Results {
fn record_passed(&mut self) {
self.passed += 1;
}
fn record_failed(&mut self) {
self.failed += 1;
}
fn summary(&self) -> String {
format!("{} passed; {} failed", self.passed, self.failed)
}
fn expect_total_success(&self) {
if self.failed == 0 {
println!("OK: {}", self.summary());
} else {
panic!("FAILED: {}", self.summary());
};
}
}
#[test]
fn cross_validate_constants_with_c() {
let mut c_constants: Vec<(String, String)> = Vec::new();
for l in get_c_output("constant").unwrap().lines() {
let mut words = l.trim().split(';');
let name = words.next().expect("Failed to parse name").to_owned();
let value = words
.next()
.and_then(|s| s.parse().ok())
.expect("Failed to parse value");
c_constants.push((name, value));
}
let mut results = Results::default();
for ((rust_name, rust_value), (c_name, c_value)) in
RUST_CONSTANTS.iter().zip(c_constants.iter())
{
if rust_name != c_name {
results.record_failed();
eprintln!("Name mismatch:\nRust: {:?}\nC: {:?}", rust_name, c_name,);
continue;
}
if rust_value != c_value {
results.record_failed();
eprintln!(
"Constant value mismatch for {}\nRust: {:?}\nC: {:?}",
rust_name, rust_value, &c_value
);
continue;
}
results.record_passed();
}
results.expect_total_success();
}
#[test]
fn cross_validate_layout_with_c() {
let mut c_layouts = Vec::new();
for l in get_c_output("layout").unwrap().lines() {
let mut words = l.trim().split(';');
let name = words.next().expect("Failed to parse name").to_owned();
let size = words
.next()
.and_then(|s| s.parse().ok())
.expect("Failed to parse size");
let alignment = words
.next()
.and_then(|s| s.parse().ok())
.expect("Failed to parse alignment");
c_layouts.push((name, Layout { size, alignment }));
}
let mut results = Results::default();
for ((rust_name, rust_layout), (c_name, c_layout)) in
RUST_LAYOUTS.iter().zip(c_layouts.iter())
{
if rust_name != c_name {
results.record_failed();
eprintln!("Name mismatch:\nRust: {:?}\nC: {:?}", rust_name, c_name,);
continue;
}
if rust_layout != c_layout {
results.record_failed();
eprintln!(
"Layout mismatch for {}\nRust: {:?}\nC: {:?}",
rust_name, rust_layout, &c_layout
);
continue;
}
results.record_passed();
}
results.expect_total_success();
}
fn get_c_output(name: &str) -> Result<String, Box<dyn Error>> {
let tmpdir = Builder::new().prefix("abi").tempdir()?;
let exe = tmpdir.path().join(name);
let c_file = Path::new("tests").join(name).with_extension("c");
let cc = Compiler::new().expect("configured compiler");
cc.compile(&c_file, &exe)?;
let mut abi_cmd = Command::new(exe);
let output = abi_cmd.output()?;
if !output.status.success() {
return Err(format!("command {:?} failed, {:?}", &abi_cmd, &output).into());
}
Ok(String::from_utf8(output.stdout)?)
}
const RUST_LAYOUTS: &[(&str, Layout)] = &[
("GWeatherConditionPhenomenon", Layout {size: size_of::<GWeatherConditionPhenomenon>(), alignment: align_of::<GWeatherConditionPhenomenon>()}),
("GWeatherConditionQualifier", Layout {size: size_of::<GWeatherConditionQualifier>(), alignment: align_of::<GWeatherConditionQualifier>()}),
("GWeatherConditions", Layout {size: size_of::<GWeatherConditions>(), alignment: align_of::<GWeatherConditions>()}),
("GWeatherDistanceUnit", Layout {size: size_of::<GWeatherDistanceUnit>(), alignment: align_of::<GWeatherDistanceUnit>()}),
("GWeatherFormatOptions", Layout {size: size_of::<GWeatherFormatOptions>(), alignment: align_of::<GWeatherFormatOptions>()}),
("GWeatherInfoClass", Layout {size: size_of::<GWeatherInfoClass>(), alignment: align_of::<GWeatherInfoClass>()}),
("GWeatherLocationEntry", Layout {size: size_of::<GWeatherLocationEntry>(), alignment: align_of::<GWeatherLocationEntry>()}),
("GWeatherLocationEntryClass", Layout {size: size_of::<GWeatherLocationEntryClass>(), alignment: align_of::<GWeatherLocationEntryClass>()}),
("GWeatherLocationLevel", Layout {size: size_of::<GWeatherLocationLevel>(), alignment: align_of::<GWeatherLocationLevel>()}),
("GWeatherMoonLatitude", Layout {size: size_of::<GWeatherMoonLatitude>(), alignment: align_of::<GWeatherMoonLatitude>()}),
("GWeatherMoonPhase", Layout {size: size_of::<GWeatherMoonPhase>(), alignment: align_of::<GWeatherMoonPhase>()}),
("GWeatherPressureUnit", Layout {size: size_of::<GWeatherPressureUnit>(), alignment: align_of::<GWeatherPressureUnit>()}),
("GWeatherProvider", Layout {size: size_of::<GWeatherProvider>(), alignment: align_of::<GWeatherProvider>()}),
("GWeatherSky", Layout {size: size_of::<GWeatherSky>(), alignment: align_of::<GWeatherSky>()}),
("GWeatherSpeedUnit", Layout {size: size_of::<GWeatherSpeedUnit>(), alignment: align_of::<GWeatherSpeedUnit>()}),
("GWeatherTemperatureUnit", Layout {size: size_of::<GWeatherTemperatureUnit>(), alignment: align_of::<GWeatherTemperatureUnit>()}),
("GWeatherTimezoneMenu", Layout {size: size_of::<GWeatherTimezoneMenu>(), alignment: align_of::<GWeatherTimezoneMenu>()}),
("GWeatherTimezoneMenuClass", Layout {size: size_of::<GWeatherTimezoneMenuClass>(), alignment: align_of::<GWeatherTimezoneMenuClass>()}),
("GWeatherWindDirection", Layout {size: size_of::<GWeatherWindDirection>(), alignment: align_of::<GWeatherWindDirection>()}),
];
const RUST_CONSTANTS: &[(&str, &str)] = &[
("(gint) GWEATHER_DISTANCE_UNIT_DEFAULT", "1"),
("(gint) GWEATHER_DISTANCE_UNIT_INVALID", "0"),
("(gint) GWEATHER_DISTANCE_UNIT_KM", "3"),
("(gint) GWEATHER_DISTANCE_UNIT_METERS", "2"),
("(gint) GWEATHER_DISTANCE_UNIT_MILES", "4"),
("(guint) GWEATHER_FORMAT_OPTION_DEFAULT", "0"),
("(guint) GWEATHER_FORMAT_OPTION_NO_CAPITALIZATION", "2"),
("(guint) GWEATHER_FORMAT_OPTION_SENTENCE_CAPITALIZATION", "1"),
("(gint) GWEATHER_LOCATION_ADM1", "3"),
("(gint) GWEATHER_LOCATION_CITY", "4"),
("(gint) GWEATHER_LOCATION_COUNTRY", "2"),
("(gint) GWEATHER_LOCATION_DETACHED", "6"),
("GWEATHER_LOCATION_ENTRY_H", "1"),
("(gint) GWEATHER_LOCATION_NAMED_TIMEZONE", "7"),
("(gint) GWEATHER_LOCATION_REGION", "1"),
("(gint) GWEATHER_LOCATION_WEATHER_STATION", "5"),
("(gint) GWEATHER_LOCATION_WORLD", "0"),
("(gint) GWEATHER_PHENOMENON_DRIZZLE", "1"),
("(gint) GWEATHER_PHENOMENON_DUST", "17"),
("(gint) GWEATHER_PHENOMENON_DUSTSTORM", "20"),
("(gint) GWEATHER_PHENOMENON_DUST_WHIRLS", "23"),
("(gint) GWEATHER_PHENOMENON_FOG", "11"),
("(gint) GWEATHER_PHENOMENON_FUNNEL_CLOUD", "21"),
("(gint) GWEATHER_PHENOMENON_HAIL", "7"),
("(gint) GWEATHER_PHENOMENON_HAZE", "15"),
("(gint) GWEATHER_PHENOMENON_ICE_CRYSTALS", "5"),
("(gint) GWEATHER_PHENOMENON_ICE_PELLETS", "6"),
("(gint) GWEATHER_PHENOMENON_INVALID", "-1"),
("(gint) GWEATHER_PHENOMENON_LAST", "24"),
("(gint) GWEATHER_PHENOMENON_MIST", "10"),
("(gint) GWEATHER_PHENOMENON_NONE", "0"),
("(gint) GWEATHER_PHENOMENON_RAIN", "2"),
("(gint) GWEATHER_PHENOMENON_SAND", "14"),
("(gint) GWEATHER_PHENOMENON_SANDSTORM", "19"),
("(gint) GWEATHER_PHENOMENON_SMALL_HAIL", "8"),
("(gint) GWEATHER_PHENOMENON_SMOKE", "12"),
("(gint) GWEATHER_PHENOMENON_SNOW", "3"),
("(gint) GWEATHER_PHENOMENON_SNOW_GRAINS", "4"),
("(gint) GWEATHER_PHENOMENON_SPRAY", "16"),
("(gint) GWEATHER_PHENOMENON_SQUALL", "18"),
("(gint) GWEATHER_PHENOMENON_TORNADO", "22"),
("(gint) GWEATHER_PHENOMENON_UNKNOWN_PRECIPITATION", "9"),
("(gint) GWEATHER_PHENOMENON_VOLCANIC_ASH", "13"),
("(gint) GWEATHER_PRESSURE_UNIT_ATM", "7"),
("(gint) GWEATHER_PRESSURE_UNIT_DEFAULT", "1"),
("(gint) GWEATHER_PRESSURE_UNIT_HPA", "3"),
("(gint) GWEATHER_PRESSURE_UNIT_INCH_HG", "6"),
("(gint) GWEATHER_PRESSURE_UNIT_INVALID", "0"),
("(gint) GWEATHER_PRESSURE_UNIT_KPA", "2"),
("(gint) GWEATHER_PRESSURE_UNIT_MB", "4"),
("(gint) GWEATHER_PRESSURE_UNIT_MM_HG", "5"),
("(guint) GWEATHER_PROVIDER_ALL", "61"),
("(guint) GWEATHER_PROVIDER_IWIN", "4"),
("(guint) GWEATHER_PROVIDER_METAR", "1"),
("(guint) GWEATHER_PROVIDER_MET_NO", "16"),
("(guint) GWEATHER_PROVIDER_NONE", "0"),
("(guint) GWEATHER_PROVIDER_OWM", "32"),
("(guint) GWEATHER_PROVIDER_YAHOO", "8"),
("(gint) GWEATHER_QUALIFIER_BLOWING", "9"),
("(gint) GWEATHER_QUALIFIER_DRIFTING", "11"),
("(gint) GWEATHER_QUALIFIER_FREEZING", "12"),
("(gint) GWEATHER_QUALIFIER_HEAVY", "4"),
("(gint) GWEATHER_QUALIFIER_INVALID", "-1"),
("(gint) GWEATHER_QUALIFIER_LAST", "13"),
("(gint) GWEATHER_QUALIFIER_LIGHT", "2"),
("(gint) GWEATHER_QUALIFIER_MODERATE", "3"),
("(gint) GWEATHER_QUALIFIER_NONE", "0"),
("(gint) GWEATHER_QUALIFIER_PARTIAL", "7"),
("(gint) GWEATHER_QUALIFIER_PATCHES", "6"),
("(gint) GWEATHER_QUALIFIER_SHALLOW", "5"),
("(gint) GWEATHER_QUALIFIER_SHOWERS", "10"),
("(gint) GWEATHER_QUALIFIER_THUNDERSTORM", "8"),
("(gint) GWEATHER_QUALIFIER_VICINITY", "1"),
("(gint) GWEATHER_SKY_BROKEN", "1"),
("(gint) GWEATHER_SKY_CLEAR", "0"),
("(gint) GWEATHER_SKY_FEW", "3"),
("(gint) GWEATHER_SKY_INVALID", "-1"),
("(gint) GWEATHER_SKY_LAST", "5"),
("(gint) GWEATHER_SKY_OVERCAST", "4"),
("(gint) GWEATHER_SKY_SCATTERED", "2"),
("(gint) GWEATHER_SPEED_UNIT_BFT", "6"),
("(gint) GWEATHER_SPEED_UNIT_DEFAULT", "1"),
("(gint) GWEATHER_SPEED_UNIT_INVALID", "0"),
("(gint) GWEATHER_SPEED_UNIT_KNOTS", "5"),
("(gint) GWEATHER_SPEED_UNIT_KPH", "3"),
("(gint) GWEATHER_SPEED_UNIT_MPH", "4"),
("(gint) GWEATHER_SPEED_UNIT_MS", "2"),
("(gint) GWEATHER_TEMP_UNIT_CENTIGRADE", "3"),
("(gint) GWEATHER_TEMP_UNIT_DEFAULT", "1"),
("(gint) GWEATHER_TEMP_UNIT_FAHRENHEIT", "4"),
("(gint) GWEATHER_TEMP_UNIT_INVALID", "0"),
("(gint) GWEATHER_TEMP_UNIT_KELVIN", "2"),
("GWEATHER_TIMEZONE_MENU_H", "1"),
("(gint) GWEATHER_WIND_E", "5"),
("(gint) GWEATHER_WIND_ENE", "4"),
("(gint) GWEATHER_WIND_ESE", "6"),
("(gint) GWEATHER_WIND_INVALID", "-1"),
("(gint) GWEATHER_WIND_LAST", "17"),
("(gint) GWEATHER_WIND_N", "1"),
("(gint) GWEATHER_WIND_NE", "3"),
("(gint) GWEATHER_WIND_NNE", "2"),
("(gint) GWEATHER_WIND_NNW", "16"),
("(gint) GWEATHER_WIND_NW", "15"),
("(gint) GWEATHER_WIND_S", "9"),
("(gint) GWEATHER_WIND_SE", "7"),
("(gint) GWEATHER_WIND_SSE", "8"),
("(gint) GWEATHER_WIND_SSW", "10"),
("(gint) GWEATHER_WIND_SW", "11"),
("(gint) GWEATHER_WIND_VARIABLE", "0"),
("(gint) GWEATHER_WIND_W", "13"),
("(gint) GWEATHER_WIND_WNW", "14"),
("(gint) GWEATHER_WIND_WSW", "12"),
];
| 39.504225 | 147 | 0.615944 |
f577dc12ea44bff48206bf20a21d286916919b04 | 739 | //! Implementation of the [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt)
//! for the pre-async versions of `hyper` such as those used by `iron`.
//!
//! # Example
//!
//! Wrapping an HTTP listener so that it will expect the PROXY protocol v2
//!
//! ```no_run
//! use hyper_networklistener_proxy::{ProxyListener, ProxyProtocolVersion};
//! use hyper::net::HttpListener;
//!
//! let listener = ProxyListener(
//! HttpListener::new("127.0.0.1:8080").unwrap(),
//! ProxyProtocolVersion::V2
//! );
//! ```
extern crate hyper;
extern crate byteorder;
mod proxy_stream;
pub mod proxy_listener;
pub mod proxy_protocol;
pub use proxy_listener::ProxyListener;
pub use proxy_protocol::ProxyProtocolVersion;
| 27.37037 | 103 | 0.713126 |
ab52a14aac843dc9861d15bfd696347b90c0112c | 2,802 | /* MIT License
*
* Copyright (c) 2019 Robert Swain <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use crate::color::{pixel_value, Radiance};
use crate::point::vector3;
use rayon::iter::IntoParallelRefMutIterator;
use rayon::prelude::*;
use rayon::slice::IterMut;
pub struct Image {
pub width: usize,
pub height: usize,
data: Vec<Radiance>,
data_argb8888: Vec<u8>,
}
impl Image {
pub fn new(width: usize, height: usize) -> Image {
let mut image = Image {
width,
height,
data: vec![vector3(0f32, 0f32, 0f32); width * height],
data_argb8888: vec![0; width * height * 4],
};
for i in 0..(width * height) {
image.data_argb8888[4 * i] = 255;
}
image
}
pub fn set(&mut self, x: usize, y: usize, l_i: Radiance) {
// println!("{}:{} = {}:{}:{}", x, y, l_i.x, l_i.y, l_i.z);
let index = y * self.width + x;
self.data[index] = l_i;
}
pub fn as_argb8888(&mut self) -> &[u8] {
self.data
.par_iter()
.zip(self.data_argb8888.par_chunks_mut(4))
.for_each(|(l, argb)| {
let color = pixel_value(*l, 1.0f32, 2.2f32) * 255.99f32;
argb[0] = color.x as u8;
argb[1] = color.y as u8;
argb[2] = color.z as u8;
});
self.data_argb8888.as_slice()
}
}
impl<'data> IntoParallelRefMutIterator<'data> for Image {
type Item = &'data mut Radiance;
type Iter = IterMut<'data, Radiance>;
fn par_iter_mut(&'data mut self) -> Self::Iter {
self.data.par_iter_mut()
}
}
pub fn image(width: usize, height: usize) -> Image {
Image::new(width, height)
}
| 32.206897 | 81 | 0.626695 |
90feb8254357db957c8dfebba371b1c4cb8d29fc | 1,253 | use crate::le::att::pdus::exchange::response::ExchangeMTURsp;
use crate::le::att::pdus::{PackablePDU, Request, UnpackablePDU};
use crate::le::att::Opcode;
use crate::le::connection::MTU;
use crate::PackError;
use std::convert::TryInto;
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Hash)]
pub struct ExchangeMTUReq(pub MTU);
impl ExchangeMTUReq {
pub const BYTE_LEN: usize = MTU::BYTE_LEN;
}
impl PackablePDU for ExchangeMTUReq {
const OPCODE: Opcode = Opcode::ExchangeMTUReq;
fn byte_len(&self) -> usize {
Self::BYTE_LEN
}
fn pack_into(&self, buf: &mut [u8]) -> Result<(), PackError> {
PackError::expect_length(Self::BYTE_LEN, buf)?;
buf.copy_from_slice(u16::from(self.0).to_le_bytes().as_ref());
Ok(())
}
}
impl UnpackablePDU for ExchangeMTUReq {
fn unpack_from(buf: &[u8]) -> Result<Self, PackError>
where
Self: Sized,
{
PackError::expect_length(Self::BYTE_LEN, buf)?;
let mtu = MTU::new_checked(u16::from_le_bytes(
buf.try_into().expect("length checked above"),
))
.ok_or(PackError::bad_index(0))?;
Ok(ExchangeMTUReq(mtu))
}
}
impl Request for ExchangeMTUReq {
type Response = ExchangeMTURsp;
}
| 29.833333 | 70 | 0.650439 |
0a3b2148470f9e0d6a568b7235072929868d6b08 | 5,537 | use serde_json::Value;
use std::collections;
use super::super::helpers;
use super::super::schema;
use super::super::validators;
#[allow(missing_copy_implementations)]
pub struct Dependencies;
impl super::Keyword for Dependencies {
fn compile(&self, def: &Value, ctx: &schema::WalkContext<'_>) -> super::KeywordResult {
let deps = keyword_key_exists!(def, "dependencies");
if !deps.is_object() {
return Err(schema::SchemaError::Malformed {
path: ctx.fragment.join("/"),
detail: "The value of this keyword MUST be an object.".to_string(),
});
}
let deps = deps.as_object().unwrap();
let mut items = collections::HashMap::new();
for (key, item) in deps.iter() {
if item.is_object() || item.is_boolean() {
items.insert(
key.clone(),
validators::dependencies::DepKind::Schema(helpers::alter_fragment_path(
ctx.url.clone(),
[
ctx.escaped_fragment().as_ref(),
"dependencies",
helpers::encode(key).as_ref(),
]
.join("/"),
)),
);
} else if item.is_array() {
let item = item.as_array().unwrap();
let mut keys = vec![];
for key in item.iter() {
if key.is_string() {
keys.push(key.as_str().unwrap().to_string())
} else {
return Err(schema::SchemaError::Malformed {
path: ctx.fragment.join("/"),
detail: "Each element MUST be a string, and elements in the array MUST be unique.".to_string()
});
}
}
items.insert(
key.clone(),
validators::dependencies::DepKind::Property(keys),
);
} else {
return Err(schema::SchemaError::Malformed {
path: ctx.fragment.join("/"),
detail:
"Each value of this object MUST be either an object, an array or a boolean."
.to_string(),
});
}
}
Ok(Some(Box::new(validators::Dependencies { items })))
}
}
#[cfg(test)]
use super::super::builder;
#[cfg(test)]
use super::super::scope;
#[cfg(test)]
use jsonway;
#[test]
fn validate_dependencies() {
let mut scope = scope::Scope::new();
let schema = scope
.compile_and_return(
builder::schema(|s| {
s.dependencies(|deps| {
deps.schema("isbn", |isbn| {
isbn.required(vec!["price".to_string()]);
isbn.properties(|props| {
props.insert("price", |price| {
price.multiple_of(5f64);
})
})
});
deps.property("item_id", vec!["item_name".to_string()]);
});
})
.into_json(),
true,
)
.ok()
.unwrap();
assert_eq!(
schema
.validate(
&jsonway::object(|obj| {
obj.set("isbn", "some_isbn".to_string());
})
.unwrap()
)
.is_valid(),
false
);
assert_eq!(
schema
.validate(
&jsonway::object(|obj| {
obj.set("isbn", "some_isbn".to_string());
obj.set("price", 773);
})
.unwrap()
)
.is_valid(),
false
);
assert_eq!(
schema
.validate(
&jsonway::object(|obj| {
obj.set("isbn", "some_isbn".to_string());
obj.set("price", 775);
})
.unwrap()
)
.is_valid(),
true
);
assert_eq!(
schema
.validate(
&jsonway::object(|obj| {
obj.set("item_id", "some_id".to_string());
})
.unwrap()
)
.is_valid(),
false
);
assert_eq!(
schema
.validate(
&jsonway::object(|obj| {
obj.set("item_id", "some_id".to_string());
obj.set("item_name", "some_name".to_string());
})
.unwrap()
)
.is_valid(),
true
);
}
#[test]
fn malformed() {
let mut scope = scope::Scope::new();
assert!(scope
.compile_and_return(
jsonway::object(|schema| {
schema.object("dependencies", |deps| {
deps.set("isbn", 10);
});
})
.unwrap(),
true
)
.is_err());
assert!(scope
.compile_and_return(
jsonway::object(|schema| {
schema.object("dependencies", |deps| {
deps.array("item_id", |item_id| item_id.push(10));
});
})
.unwrap(),
true
)
.is_err());
}
| 28.838542 | 122 | 0.40708 |
5bbcac51f8ca8d1b941c5f96bbd21a43942e3251 | 712 | fn power_of_two(l: isize, n: isize) -> isize {
let mut test: isize = 0;
let log: f64 = 2.0_f64.ln() / 10.0_f64.ln();
let mut factor: isize = 1;
let mut looop = l;
let mut nn = n;
while looop > 10 {
factor *= 10;
looop /= 10;
}
while nn > 0 {
test = test + 1;
let val: isize = (factor as f64 * 10.0_f64.powf(test as f64 * log % 1.0)) as isize;
if val == l {
nn = nn - 1;
}
}
test
}
fn run_test(l: isize, n: isize) {
println!("p({}, {}) = {}", l, n, power_of_two(l, n));
}
fn main() {
run_test(12, 1);
run_test(12, 2);
run_test(123, 45);
run_test(123, 12345);
run_test(123, 678910);
}
| 20.342857 | 91 | 0.484551 |
efbb0bfb1508f4c8c8355490bb28fe43e1fb7ed1 | 148 | fn main() {
let x = (1, 2, 3);
match x {
(_a, _x @ ..) => {}
_ => {}
}
}
//~^^^^ ERROR `_x @` is not allowed in a tuple
| 16.444444 | 46 | 0.344595 |
39edf7cf603b4e5293501d96a5f60676edb866c6 | 1,020 | use gouda::rendering::drawable::{QuadDrawable, TextureDrawable};
use gouda::rendering::texture::RenderableTexture;
use gouda::ecs::{Entity, ECS};
use crate::tilemap::Tile;
use std::rc::Rc;
use gouda::rendering::{Renderer, Scene};
use gouda::png::PNG;
use crate::camera::Camera;
#[derive(Debug)]
pub struct Hearth {
drawable: TextureDrawable,
}
impl Hearth {
pub fn create(ecs: &mut ECS, tile: Entity) {
let tile = ecs.read::<Tile>(&tile).unwrap();
let renderer = ecs.read_res::<Rc<Renderer>>();
let texture = RenderableTexture::new(renderer, &PNG::from_file("bitmap/hearth.png").unwrap().image());
let drawable = TextureDrawable::new(false, renderer, texture, [tile.x as f32, tile.y as f32, 0.], [0.4, 0.4, 1.0], [0.; 3]);
let hearth = Hearth {
drawable,
};
ecs.build_entity().add(hearth);
}
pub fn draw(&self, scene: &Scene, camera: &Camera) {
self.drawable.draw_with_projection(scene, &camera.projection_buffer);
}
}
| 30.909091 | 132 | 0.638235 |
e49a63fc7fc711dca787d7ef32d3654f5b5bcceb | 4,330 | //! Thousands of utility functions for slices and vec
#![doc(html_root_url = "https://docs.rs/lazyext-slice/0.0.2")]
#![deny(
missing_docs,
warnings,
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, allow(unused_attributes))]
#[cfg(feature = "alloc")]
extern crate alloc;
#[macro_use]
extern crate paste;
macro_rules! cfg_alloc {
($($item:item)*) => {
$(
#[cfg(feature = "alloc")]
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
$item
)*
}
}
macro_rules! has_prefix {
($trait:tt::$fn:tt) => {
/// Returns whether the slice self begins with prefix.
#[inline]
fn has_prefix(&self, prefix: impl $trait) -> bool {
let src = $trait::$fn(self);
let prefix = $trait::$fn(&prefix);
let pl = prefix.len();
if src.len() < pl {
return false;
}
src[0..pl].eq(prefix)
}
};
}
macro_rules! has_suffix {
($trait:tt::$fn:tt) => {
/// Returns whether the slice self ends with suffix.
#[inline]
fn has_suffix(&self, suffix: impl $trait) -> bool {
let src = $trait::$fn(self);
let suffix = $trait::$fn(&suffix);
let pl = suffix.len() - 1;
if src.len() <= pl {
return false;
}
src[pl..].eq(suffix)
}
};
}
macro_rules! longest_prefix {
($trait:tt::$fn:tt, $ty: ty) => {
/// Finds the longest shared prefix
#[inline]
fn longest_prefix(&self, other: impl $trait) -> &[$ty] {
let k1 = $trait::$fn(self);
let k2 = $trait::$fn(&other);
let max = k1.len().min(k2.len());
let mut n = max - 1;
for i in 0..max {
if k1[i].ne(&k2[i]) {
n = i;
break;
}
}
&k1[..n]
}
};
}
macro_rules! longest_suffix {
($trait:tt::$fn:tt, $ty: ty) => {
/// Finds the longest shared suffix
#[inline]
fn longest_suffix(&self, other: impl $trait) -> &[$ty] {
let k1 = $trait::$fn(self);
let k1_len = k1.len();
let k2 = $trait::$fn(&other);
let k2_len = k2.len();
return if k1_len < k2_len {
let max = k1_len;
let mut n = max;
for i in 0..max {
if k1[k1_len - i - 1].ne(&k2[k2_len - i - 1]) {
n = i;
break;
}
}
&k1[max - n..]
} else {
let max = k2_len;
let mut n = max;
for i in 0..max {
if k1[k1_len - i - 1].ne(&k2[k2_len - i - 1]) {
n = i;
break;
}
}
&k1[k1_len - k2_len + max - n..]
};
}
}
}
#[cfg(feature = "alloc")]
macro_rules! longest_prefix_lossy {
($trait:tt::$fn:tt, $ty: ty, $ty_literal: literal) => {
#[doc = concat!("Finds the longest shared prefix, return a Cow<'_, [", $ty_literal, "]>.")]
#[inline]
fn longest_prefix_lossy(&self, other: impl $trait) -> Cow<'_, [$ty]> {
Cow::Borrowed(self.longest_prefix(other))
}
};
}
#[cfg(feature = "alloc")]
macro_rules! longest_suffix_lossy {
($trait:tt::$fn:tt, $ty: ty, $ty_literal: literal) => {
#[doc = concat!("Finds the longest shared suffix, return a Cow<'_, [", $ty_literal, "]>.")]
#[inline]
fn longest_suffix_lossy(&self, other: impl $trait) -> Cow<'_, [$ty]> {
Cow::Borrowed(self.longest_suffix(other))
}
};
}
macro_rules! impl_psfix_suites {
($trait:tt::$fn:tt, $ty: ty, $ty_literal: literal) => {
has_prefix!($trait::$fn);
has_suffix!($trait::$fn);
longest_prefix!($trait::$fn, $ty);
longest_suffix!($trait::$fn, $ty);
cfg_alloc!{
longest_prefix_lossy!($trait::$fn, $ty, $ty_literal);
longest_suffix_lossy!($trait::$fn, $ty, $ty_literal);
}
};
}
mod bytes_ext;
mod slice_ext;
pub use bytes_ext::*;
pub use slice_ext::*;
| 27.232704 | 99 | 0.457275 |
9bb53956dcdf0b3259a2a2ff0d0410f219c9fdf9 | 49,481 | // Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::rpc;
use anyhow::{bail, ensure, Context, Result};
use serde::Deserialize;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::str::FromStr;
use std::time;
use tracing::instrument;
const DEBUG_CONSOLE_FLAG: &str = "agent.debug_console";
const DEV_MODE_FLAG: &str = "agent.devmode";
const TRACE_MODE_OPTION: &str = "agent.trace";
const LOG_LEVEL_OPTION: &str = "agent.log";
const SERVER_ADDR_OPTION: &str = "agent.server_addr";
const HOTPLUG_TIMOUT_OPTION: &str = "agent.hotplug_timeout";
const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
const LOG_VPORT_OPTION: &str = "agent.log_vport";
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
const CONFIG_FILE: &str = "agent.config_file";
const CONTAINER_POLICY_FILE: &str = "agent.container_policy_file";
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
const VSOCK_ADDR: &str = "vsock://-1";
const VSOCK_PORT: u16 = 1024;
// Environment variables used for development and testing
const SERVER_ADDR_ENV_VAR: &str = "KATA_AGENT_SERVER_ADDR";
const LOG_LEVEL_ENV_VAR: &str = "KATA_AGENT_LOG_LEVEL";
const TRACING_ENV_VAR: &str = "KATA_AGENT_TRACING";
const ERR_INVALID_LOG_LEVEL: &str = "invalid log level";
const ERR_INVALID_LOG_LEVEL_PARAM: &str = "invalid log level parameter";
const ERR_INVALID_GET_VALUE_PARAM: &str = "expected name=value";
const ERR_INVALID_GET_VALUE_NO_NAME: &str = "name=value parameter missing name";
const ERR_INVALID_GET_VALUE_NO_VALUE: &str = "name=value parameter missing value";
const ERR_INVALID_LOG_LEVEL_KEY: &str = "invalid log level key name";
const ERR_INVALID_HOTPLUG_TIMEOUT: &str = "invalid hotplug timeout parameter";
const ERR_INVALID_HOTPLUG_TIMEOUT_PARAM: &str = "unable to parse hotplug timeout";
const ERR_INVALID_HOTPLUG_TIMEOUT_KEY: &str = "invalid hotplug timeout key name";
const ERR_INVALID_CONTAINER_PIPE_SIZE: &str = "invalid container pipe size parameter";
const ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM: &str = "unable to parse container pipe size";
const ERR_INVALID_CONTAINER_PIPE_SIZE_KEY: &str = "invalid container pipe size key name";
const ERR_INVALID_CONTAINER_PIPE_NEGATIVE: &str = "container pipe size should not be negative";
const ERR_INVALID_CONTAINER_POLICY_PATH_VALUE: &str = "invalid container_policy_file value";
const ERR_INVALID_CONTAINER_POLICY_PATH_KEY: &str = "invalid container_policy_file key";
const ERR_INVALID_CONTAINER_POLICY_ABSOLUTE: &str =
"container_policy_file path must be an absolute file path";
#[derive(Debug, Default, Deserialize)]
pub struct EndpointsConfig {
pub allowed: Vec<String>,
}
#[derive(Debug, Default)]
pub struct AgentEndpoints {
pub allowed: HashSet<String>,
pub all_allowed: bool,
}
#[derive(Debug)]
pub struct AgentConfig {
pub debug_console: bool,
pub dev_mode: bool,
pub log_level: slog::Level,
pub hotplug_timeout: time::Duration,
pub debug_console_vport: i32,
pub log_vport: i32,
pub container_pipe_size: i32,
pub server_addr: String,
pub unified_cgroup_hierarchy: bool,
pub tracing: bool,
pub endpoints: AgentEndpoints,
pub supports_seccomp: bool,
pub container_policy_path: String,
pub aa_kbc_params: String,
}
#[derive(Debug, Deserialize)]
pub struct AgentConfigBuilder {
pub debug_console: Option<bool>,
pub dev_mode: Option<bool>,
pub log_level: Option<String>,
pub hotplug_timeout: Option<time::Duration>,
pub debug_console_vport: Option<i32>,
pub log_vport: Option<i32>,
pub container_pipe_size: Option<i32>,
pub server_addr: Option<String>,
pub unified_cgroup_hierarchy: Option<bool>,
pub tracing: Option<bool>,
pub endpoints: Option<EndpointsConfig>,
pub container_policy_path: Option<String>,
pub aa_kbc_params: Option<String>,
}
macro_rules! config_override {
($builder:ident, $config:ident, $field:ident) => {
if let Some(v) = $builder.$field {
$config.$field = v;
}
};
($builder:ident, $config:ident, $field:ident, $func: ident) => {
if let Some(v) = $builder.$field {
$config.$field = $func(&v)?;
}
};
}
// parse_cmdline_param parse commandline parameters.
macro_rules! parse_cmdline_param {
// commandline flags, without func to parse the option values
($param:ident, $key:ident, $field:expr) => {
if $param.eq(&$key) {
$field = true;
continue;
}
};
// commandline options, with func to parse the option values
($param:ident, $key:ident, $field:expr, $func:ident) => {
if $param.starts_with(format!("{}=", $key).as_str()) {
let val = $func($param)?;
$field = val;
continue;
}
};
// commandline options, with func to parse the option values, and match func
// to valid the values
($param:ident, $key:ident, $field:expr, $func:ident, $guard:expr) => {
if $param.starts_with(format!("{}=", $key).as_str()) {
let val = $func($param)?;
if $guard(val) {
$field = val;
}
continue;
}
};
}
impl Default for AgentConfig {
fn default() -> Self {
AgentConfig {
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
debug_console_vport: 0,
log_vport: 0,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: format!("{}:{}", VSOCK_ADDR, VSOCK_PORT),
unified_cgroup_hierarchy: false,
tracing: false,
endpoints: Default::default(),
supports_seccomp: rpc::have_seccomp(),
container_policy_path: String::from(""),
aa_kbc_params: String::from(""),
}
}
}
impl FromStr for AgentConfig {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let agent_config_builder: AgentConfigBuilder =
toml::from_str(s).map_err(anyhow::Error::new)?;
let mut agent_config: AgentConfig = Default::default();
// Overwrite default values with the configuration files ones.
config_override!(agent_config_builder, agent_config, debug_console);
config_override!(agent_config_builder, agent_config, dev_mode);
config_override!(
agent_config_builder,
agent_config,
log_level,
logrus_to_slog_level
);
config_override!(agent_config_builder, agent_config, hotplug_timeout);
config_override!(agent_config_builder, agent_config, debug_console_vport);
config_override!(agent_config_builder, agent_config, log_vport);
config_override!(agent_config_builder, agent_config, container_pipe_size);
config_override!(agent_config_builder, agent_config, server_addr);
config_override!(agent_config_builder, agent_config, unified_cgroup_hierarchy);
config_override!(agent_config_builder, agent_config, tracing);
config_override!(agent_config_builder, agent_config, container_policy_path);
config_override!(agent_config_builder, agent_config, aa_kbc_params);
// Populate the allowed endpoints hash set, if we got any from the config file.
if let Some(endpoints) = agent_config_builder.endpoints {
for ep in endpoints.allowed {
agent_config.endpoints.allowed.insert(ep);
}
}
Ok(agent_config)
}
}
impl AgentConfig {
#[instrument]
pub fn from_cmdline(file: &str) -> Result<AgentConfig> {
let mut config: AgentConfig = Default::default();
let cmdline = fs::read_to_string(file)?;
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
for param in params.iter() {
// If we get a configuration file path from the command line, we
// generate our config from it.
// The agent will fail to start if the configuration file is not present,
// or if it can't be parsed properly.
if param.starts_with(format!("{}=", CONFIG_FILE).as_str()) {
let config_file = get_string_value(param)?;
return AgentConfig::from_config_file(&config_file);
}
// parse cmdline flags
parse_cmdline_param!(param, DEBUG_CONSOLE_FLAG, config.debug_console);
parse_cmdline_param!(param, DEV_MODE_FLAG, config.dev_mode);
// Support "bare" tracing option for backwards compatibility with
// Kata 1.x.
if param == &TRACE_MODE_OPTION {
config.tracing = true;
continue;
}
parse_cmdline_param!(param, TRACE_MODE_OPTION, config.tracing, get_bool_value);
// parse cmdline options
parse_cmdline_param!(param, LOG_LEVEL_OPTION, config.log_level, get_log_level);
parse_cmdline_param!(
param,
SERVER_ADDR_OPTION,
config.server_addr,
get_string_value
);
// ensure the timeout is a positive value
parse_cmdline_param!(
param,
HOTPLUG_TIMOUT_OPTION,
config.hotplug_timeout,
get_hotplug_timeout,
|hotplug_timeout: time::Duration| hotplug_timeout.as_secs() > 0
);
// vsock port should be positive values
parse_cmdline_param!(
param,
DEBUG_CONSOLE_VPORT_OPTION,
config.debug_console_vport,
get_vsock_port,
|port| port > 0
);
parse_cmdline_param!(
param,
LOG_VPORT_OPTION,
config.log_vport,
get_vsock_port,
|port| port > 0
);
parse_cmdline_param!(
param,
CONTAINER_PIPE_SIZE_OPTION,
config.container_pipe_size,
get_container_pipe_size
);
parse_cmdline_param!(
param,
UNIFIED_CGROUP_HIERARCHY_OPTION,
config.unified_cgroup_hierarchy,
get_bool_value
);
parse_cmdline_param!(
param,
CONTAINER_POLICY_FILE,
config.container_policy_path,
get_container_policy_path_value
);
}
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
config.server_addr = addr;
}
if let Ok(addr) = env::var(LOG_LEVEL_ENV_VAR) {
if let Ok(level) = logrus_to_slog_level(&addr) {
config.log_level = level;
}
}
if let Ok(value) = env::var(TRACING_ENV_VAR) {
let name_value = format!("{}={}", TRACING_ENV_VAR, value);
config.tracing = get_bool_value(&name_value)?;
}
// We did not get a configuration file: allow all endpoints.
config.endpoints.all_allowed = true;
Ok(config)
}
#[instrument]
pub fn from_config_file(file: &str) -> Result<AgentConfig> {
let config = fs::read_to_string(file)?;
AgentConfig::from_str(&config)
}
pub fn is_allowed_endpoint(&self, ep: &str) -> bool {
self.endpoints.all_allowed || self.endpoints.allowed.contains(ep)
}
}
#[instrument]
fn get_vsock_port(p: &str) -> Result<i32> {
let fields: Vec<&str> = p.split('=').collect();
ensure!(fields.len() == 2, "invalid port parameter");
Ok(fields[1].parse::<i32>()?)
}
// Map logrus (https://godoc.org/github.com/sirupsen/logrus)
// log level to the equivalent slog log levels.
//
// Note: Logrus names are used for compatability with the previous
// golang-based agent.
#[instrument]
fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
let level = match logrus_level {
// Note: different semantics to logrus: log, but don't panic.
"fatal" | "panic" => slog::Level::Critical,
"critical" => slog::Level::Critical,
"error" => slog::Level::Error,
"warn" | "warning" => slog::Level::Warning,
"info" => slog::Level::Info,
"debug" => slog::Level::Debug,
// Not in logrus
"trace" => slog::Level::Trace,
_ => bail!(ERR_INVALID_LOG_LEVEL),
};
Ok(level)
}
#[instrument]
fn get_log_level(param: &str) -> Result<slog::Level> {
let fields: Vec<&str> = param.split('=').collect();
ensure!(fields.len() == 2, ERR_INVALID_LOG_LEVEL_PARAM);
ensure!(fields[0] == LOG_LEVEL_OPTION, ERR_INVALID_LOG_LEVEL_KEY);
logrus_to_slog_level(fields[1])
}
#[instrument]
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
let fields: Vec<&str> = param.split('=').collect();
ensure!(fields.len() == 2, ERR_INVALID_HOTPLUG_TIMEOUT);
ensure!(
fields[0] == HOTPLUG_TIMOUT_OPTION,
ERR_INVALID_HOTPLUG_TIMEOUT_KEY
);
let value = fields[1]
.parse::<u64>()
.with_context(|| ERR_INVALID_HOTPLUG_TIMEOUT_PARAM)?;
Ok(time::Duration::from_secs(value))
}
#[instrument]
fn get_bool_value(param: &str) -> Result<bool> {
let fields: Vec<&str> = param.split('=').collect();
if fields.len() != 2 {
return Ok(false);
}
let v = fields[1];
// first try to parse as bool value
v.parse::<bool>().or_else(|_err1| {
// then try to parse as integer value
v.parse::<u64>().or(Ok(0)).map(|v| !matches!(v, 0))
})
}
// Return the value from a "name=value" string.
//
// Note:
//
// - A name *and* a value is required.
// - A value can contain any number of equal signs.
// - We could/should maybe check if the name is pure whitespace
// since this is considered to be invalid.
#[instrument]
fn get_string_value(param: &str) -> Result<String> {
let fields: Vec<&str> = param.split('=').collect();
ensure!(fields.len() >= 2, ERR_INVALID_GET_VALUE_PARAM);
// We need name (but the value can be blank)
ensure!(!fields[0].is_empty(), ERR_INVALID_GET_VALUE_NO_NAME);
let value = fields[1..].join("=");
ensure!(!value.is_empty(), ERR_INVALID_GET_VALUE_NO_VALUE);
Ok(value)
}
#[instrument]
fn get_container_pipe_size(param: &str) -> Result<i32> {
let fields: Vec<&str> = param.split('=').collect();
ensure!(fields.len() == 2, ERR_INVALID_CONTAINER_PIPE_SIZE);
let key = fields[0];
ensure!(
key == CONTAINER_PIPE_SIZE_OPTION,
ERR_INVALID_CONTAINER_PIPE_SIZE_KEY
);
let value = fields[1]
.parse::<i32>()
.with_context(|| ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)?;
ensure!(value >= 0, ERR_INVALID_CONTAINER_PIPE_NEGATIVE);
Ok(value)
}
#[instrument]
fn get_container_policy_path_value(param: &str) -> Result<String> {
let fields: Vec<&str> = param.split('=').collect();
ensure!(!fields[0].is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_KEY);
ensure!(fields.len() == 2, ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
let key = fields[0];
ensure!(
key == CONTAINER_POLICY_FILE,
ERR_INVALID_CONTAINER_POLICY_PATH_KEY
);
let value = String::from(fields[1]);
ensure!(!value.is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
ensure!(
value.starts_with('/'),
ERR_INVALID_CONTAINER_POLICY_ABSOLUTE
);
ensure!(!value.contains(".."), ERR_INVALID_CONTAINER_POLICY_ABSOLUTE);
Ok(value)
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::anyhow;
use std::fs::File;
use std::io::Write;
use std::time;
use tempfile::tempdir;
// Parameters:
//
// 1: expected Result
// 2: actual Result
// 3: string used to identify the test on error
macro_rules! assert_result {
($expected_result:expr, $actual_result:expr, $msg:expr) => {
if $expected_result.is_ok() {
let expected_level = $expected_result.as_ref().unwrap();
let actual_level = $actual_result.unwrap();
assert!(*expected_level == actual_level, "{}", $msg);
} else {
let expected_error = $expected_result.as_ref().unwrap_err();
let expected_error_msg = format!("{:?}", expected_error);
if let Err(actual_error) = $actual_result {
let actual_error_msg = format!("{:?}", actual_error);
assert!(expected_error_msg == actual_error_msg, "{}", $msg);
} else {
assert!(expected_error_msg == "expected error, got OK", "{}", $msg);
}
}
};
}
#[test]
fn test_new() {
let config: AgentConfig = Default::default();
assert!(!config.debug_console);
assert!(!config.dev_mode);
assert_eq!(config.log_level, DEFAULT_LOG_LEVEL);
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
assert_eq!(config.container_policy_path, "");
}
#[test]
fn test_from_cmdline() {
const TEST_SERVER_ADDR: &str = "vsock://-1:1024";
#[derive(Debug)]
struct TestData<'a> {
contents: &'a str,
env_vars: Vec<&'a str>,
debug_console: bool,
dev_mode: bool,
log_level: slog::Level,
hotplug_timeout: time::Duration,
container_pipe_size: i32,
server_addr: &'a str,
unified_cgroup_hierarchy: bool,
tracing: bool,
container_policy_path: &'a str,
}
impl Default for TestData<'_> {
fn default() -> Self {
TestData {
contents: "",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
tracing: false,
container_policy_path: "",
}
}
}
let tests = &[
TestData {
contents: "agent.debug_consolex agent.devmode",
dev_mode: true,
..Default::default()
},
TestData {
contents: "agent.debug_console agent.devmodex",
debug_console: true,
..Default::default()
},
TestData {
contents: "agent.logx=debug",
..Default::default()
},
TestData {
contents: "agent.log=debug",
log_level: slog::Level::Debug,
..Default::default()
},
TestData {
contents: "agent.log=debug",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=trace"],
log_level: slog::Level::Trace,
..Default::default()
},
TestData {
contents: "",
..Default::default()
},
TestData {
contents: "foo",
..Default::default()
},
TestData {
contents: "foo bar",
..Default::default()
},
TestData {
contents: "foo bar",
..Default::default()
},
TestData {
contents: "foo agent bar",
..Default::default()
},
TestData {
contents: "foo debug_console agent bar devmode",
..Default::default()
},
TestData {
contents: "agent.debug_console",
debug_console: true,
..Default::default()
},
TestData {
contents: " agent.debug_console ",
debug_console: true,
..Default::default()
},
TestData {
contents: "agent.debug_console foo",
debug_console: true,
..Default::default()
},
TestData {
contents: " agent.debug_console foo",
debug_console: true,
..Default::default()
},
TestData {
contents: "foo agent.debug_console bar",
debug_console: true,
..Default::default()
},
TestData {
contents: "foo agent.debug_console",
debug_console: true,
..Default::default()
},
TestData {
contents: "foo agent.debug_console ",
debug_console: true,
..Default::default()
},
TestData {
contents: "agent.devmode",
dev_mode: true,
..Default::default()
},
TestData {
contents: " agent.devmode ",
dev_mode: true,
..Default::default()
},
TestData {
contents: "agent.devmode foo",
dev_mode: true,
..Default::default()
},
TestData {
contents: " agent.devmode foo",
dev_mode: true,
..Default::default()
},
TestData {
contents: "foo agent.devmode bar",
dev_mode: true,
..Default::default()
},
TestData {
contents: "foo agent.devmode",
dev_mode: true,
..Default::default()
},
TestData {
contents: "foo agent.devmode ",
dev_mode: true,
..Default::default()
},
TestData {
contents: "agent.devmode agent.debug_console",
debug_console: true,
dev_mode: true,
..Default::default()
},
TestData {
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100 agent.unified_cgroup_hierarchy=a",
debug_console: true,
dev_mode: true,
hotplug_timeout: time::Duration::from_secs(100),
..Default::default()
},
TestData {
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=0 agent.unified_cgroup_hierarchy=11",
debug_console: true,
dev_mode: true,
unified_cgroup_hierarchy: true,
..Default::default()
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=2097152 agent.unified_cgroup_hierarchy=false",
debug_console: true,
dev_mode: true,
container_pipe_size: 2097152,
..Default::default()
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=100 agent.unified_cgroup_hierarchy=true",
debug_console: true,
dev_mode: true,
container_pipe_size: 100,
unified_cgroup_hierarchy: true,
..Default::default()
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=0 agent.unified_cgroup_hierarchy=0",
debug_console: true,
dev_mode: true,
..Default::default()
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pip_siz=100 agent.unified_cgroup_hierarchy=1",
debug_console: true,
dev_mode: true,
unified_cgroup_hierarchy: true,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=foo"],
server_addr: "foo",
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=="],
server_addr: "=",
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR==foo"],
server_addr: "=foo",
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=foo=bar=baz="],
server_addr: "foo=bar=baz=",
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=unix:///tmp/foo.socket"],
server_addr: "unix:///tmp/foo.socket",
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=unix://@/tmp/foo.socket"],
server_addr: "unix://@/tmp/foo.socket",
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL="],
log_level: DEFAULT_LOG_LEVEL,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=invalid"],
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=debug"],
log_level: slog::Level::Debug,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=debugger"],
log_level: DEFAULT_LOG_LEVEL,
..Default::default()
},
TestData {
contents: "server_addr=unix:///tmp/foo.socket",
server_addr: TEST_SERVER_ADDR,
..Default::default()
},
TestData {
contents: "agent.server_address=unix:///tmp/foo.socket",
server_addr: TEST_SERVER_ADDR,
..Default::default()
},
TestData {
contents: "agent.server_addr=unix:///tmp/foo.socket",
server_addr: "unix:///tmp/foo.socket",
..Default::default()
},
TestData {
contents: " agent.server_addr=unix:///tmp/foo.socket",
server_addr: "unix:///tmp/foo.socket",
..Default::default()
},
TestData {
contents: " agent.server_addr=unix:///tmp/foo.socket a",
server_addr: "unix:///tmp/foo.socket",
..Default::default()
},
TestData {
contents: "trace",
tracing: false,
..Default::default()
},
TestData {
contents: ".trace",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.tracer",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.trac",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.trace",
tracing: true,
..Default::default()
},
TestData {
contents: "agent.trace=true",
tracing: true,
..Default::default()
},
TestData {
contents: "agent.trace=false",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.trace=0",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.trace=1",
tracing: true,
..Default::default()
},
TestData {
contents: "agent.trace=a",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.trace=foo",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.trace=.",
tracing: false,
..Default::default()
},
TestData {
contents: "agent.trace=,",
tracing: false,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING="],
tracing: false,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING=''"],
tracing: false,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING=0"],
tracing: false,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING=."],
tracing: false,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING=,"],
tracing: false,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING=foo"],
tracing: false,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING=1"],
tracing: true,
..Default::default()
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_TRACING=true"],
tracing: true,
..Default::default()
},
TestData {
contents: "agent.container_policy_file=/etc/containers/policy.json",
container_policy_path: "/etc/containers/policy.json",
..Default::default()
},
];
let dir = tempdir().expect("failed to create tmpdir");
// Now, test various combinations of file contents and environment
// variables.
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let file_path = dir.path().join("cmdline");
let filename = file_path.to_str().expect("failed to create filename");
let mut file =
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes())
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
let mut vars_to_unset = Vec::new();
for v in &d.env_vars {
let fields: Vec<&str> = v.split('=').collect();
let name = fields[0];
let value = fields[1..].join("=");
env::set_var(name, value);
vars_to_unset.push(name);
}
let config = AgentConfig::from_cmdline(filename).expect("Failed to parse command line");
assert_eq!(d.debug_console, config.debug_console, "{}", msg);
assert_eq!(d.dev_mode, config.dev_mode, "{}", msg);
assert_eq!(
d.unified_cgroup_hierarchy, config.unified_cgroup_hierarchy,
"{}",
msg
);
assert_eq!(d.log_level, config.log_level, "{}", msg);
assert_eq!(d.hotplug_timeout, config.hotplug_timeout, "{}", msg);
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
assert_eq!(d.tracing, config.tracing, "{}", msg);
for v in vars_to_unset {
env::remove_var(v);
}
}
}
#[test]
fn test_logrus_to_slog_level() {
#[derive(Debug)]
struct TestData<'a> {
logrus_level: &'a str,
result: Result<slog::Level>,
}
let tests = &[
TestData {
logrus_level: "",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL)),
},
TestData {
logrus_level: "foo",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL)),
},
TestData {
logrus_level: "debugging",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL)),
},
TestData {
logrus_level: "xdebug",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL)),
},
TestData {
logrus_level: "trace",
result: Ok(slog::Level::Trace),
},
TestData {
logrus_level: "debug",
result: Ok(slog::Level::Debug),
},
TestData {
logrus_level: "info",
result: Ok(slog::Level::Info),
},
TestData {
logrus_level: "warn",
result: Ok(slog::Level::Warning),
},
TestData {
logrus_level: "warning",
result: Ok(slog::Level::Warning),
},
TestData {
logrus_level: "error",
result: Ok(slog::Level::Error),
},
TestData {
logrus_level: "critical",
result: Ok(slog::Level::Critical),
},
TestData {
logrus_level: "fatal",
result: Ok(slog::Level::Critical),
},
TestData {
logrus_level: "panic",
result: Ok(slog::Level::Critical),
},
];
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = logrus_to_slog_level(d.logrus_level);
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
}
}
#[test]
fn test_get_log_level() {
#[derive(Debug)]
struct TestData<'a> {
param: &'a str,
result: Result<slog::Level>,
}
let tests = &[
TestData {
param: "",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_PARAM)),
},
TestData {
param: "=",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_KEY)),
},
TestData {
param: "x=",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_KEY)),
},
TestData {
param: "=y",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_KEY)),
},
TestData {
param: "==",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_PARAM)),
},
TestData {
param: "= =",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_PARAM)),
},
TestData {
param: "x=y",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_KEY)),
},
TestData {
param: "agent=debug",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_KEY)),
},
TestData {
param: "agent.logg=debug",
result: Err(anyhow!(ERR_INVALID_LOG_LEVEL_KEY)),
},
TestData {
param: "agent.log=trace",
result: Ok(slog::Level::Trace),
},
TestData {
param: "agent.log=debug",
result: Ok(slog::Level::Debug),
},
TestData {
param: "agent.log=info",
result: Ok(slog::Level::Info),
},
TestData {
param: "agent.log=warn",
result: Ok(slog::Level::Warning),
},
TestData {
param: "agent.log=warning",
result: Ok(slog::Level::Warning),
},
TestData {
param: "agent.log=error",
result: Ok(slog::Level::Error),
},
TestData {
param: "agent.log=critical",
result: Ok(slog::Level::Critical),
},
TestData {
param: "agent.log=fatal",
result: Ok(slog::Level::Critical),
},
TestData {
param: "agent.log=panic",
result: Ok(slog::Level::Critical),
},
];
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = get_log_level(d.param);
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
}
}
#[test]
fn test_get_hotplug_timeout() {
#[derive(Debug)]
struct TestData<'a> {
param: &'a str,
result: Result<time::Duration>,
}
let tests = &[
TestData {
param: "",
result: Err(anyhow!(ERR_INVALID_HOTPLUG_TIMEOUT)),
},
TestData {
param: "agent.hotplug_timeout",
result: Err(anyhow!(ERR_INVALID_HOTPLUG_TIMEOUT)),
},
TestData {
param: "foo=bar",
result: Err(anyhow!(ERR_INVALID_HOTPLUG_TIMEOUT_KEY)),
},
TestData {
param: "agent.hotplug_timeot=1",
result: Err(anyhow!(ERR_INVALID_HOTPLUG_TIMEOUT_KEY)),
},
TestData {
param: "agent.hotplug_timeout=1",
result: Ok(time::Duration::from_secs(1)),
},
TestData {
param: "agent.hotplug_timeout=3",
result: Ok(time::Duration::from_secs(3)),
},
TestData {
param: "agent.hotplug_timeout=3600",
result: Ok(time::Duration::from_secs(3600)),
},
TestData {
param: "agent.hotplug_timeout=0",
result: Ok(time::Duration::from_secs(0)),
},
TestData {
param: "agent.hotplug_timeout=-1",
result: Err(anyhow!(
"unable to parse hotplug timeout
Caused by:
invalid digit found in string"
)),
},
TestData {
param: "agent.hotplug_timeout=4jbsdja",
result: Err(anyhow!(
"unable to parse hotplug timeout
Caused by:
invalid digit found in string"
)),
},
TestData {
param: "agent.hotplug_timeout=foo",
result: Err(anyhow!(
"unable to parse hotplug timeout
Caused by:
invalid digit found in string"
)),
},
TestData {
param: "agent.hotplug_timeout=j",
result: Err(anyhow!(
"unable to parse hotplug timeout
Caused by:
invalid digit found in string"
)),
},
];
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = get_hotplug_timeout(d.param);
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
}
}
#[test]
fn test_get_container_pipe_size() {
#[derive(Debug)]
struct TestData<'a> {
param: &'a str,
result: Result<i32>,
}
let tests = &[
TestData {
param: "",
result: Err(anyhow!(ERR_INVALID_CONTAINER_PIPE_SIZE)),
},
TestData {
param: "agent.container_pipe_size",
result: Err(anyhow!(ERR_INVALID_CONTAINER_PIPE_SIZE)),
},
TestData {
param: "foo=bar",
result: Err(anyhow!(ERR_INVALID_CONTAINER_PIPE_SIZE_KEY)),
},
TestData {
param: "agent.container_pip_siz=1",
result: Err(anyhow!(ERR_INVALID_CONTAINER_PIPE_SIZE_KEY)),
},
TestData {
param: "agent.container_pipe_size=1",
result: Ok(1),
},
TestData {
param: "agent.container_pipe_size=3",
result: Ok(3),
},
TestData {
param: "agent.container_pipe_size=2097152",
result: Ok(2097152),
},
TestData {
param: "agent.container_pipe_size=0",
result: Ok(0),
},
TestData {
param: "agent.container_pipe_size=-1",
result: Err(anyhow!(ERR_INVALID_CONTAINER_PIPE_NEGATIVE)),
},
TestData {
param: "agent.container_pipe_size=foobar",
result: Err(anyhow!(
"unable to parse container pipe size
Caused by:
invalid digit found in string"
)),
},
TestData {
param: "agent.container_pipe_size=j",
result: Err(anyhow!(
"unable to parse container pipe size
Caused by:
invalid digit found in string",
)),
},
TestData {
param: "agent.container_pipe_size=4jbsdja",
result: Err(anyhow!(
"unable to parse container pipe size
Caused by:
invalid digit found in string"
)),
},
TestData {
param: "agent.container_pipe_size=4294967296",
result: Err(anyhow!(
"unable to parse container pipe size
Caused by:
number too large to fit in target type"
)),
},
];
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = get_container_pipe_size(d.param);
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
}
}
#[test]
fn test_get_string_value() {
#[derive(Debug)]
struct TestData<'a> {
param: &'a str,
result: Result<String>,
}
let tests = &[
TestData {
param: "",
result: Err(anyhow!(ERR_INVALID_GET_VALUE_PARAM)),
},
TestData {
param: "=",
result: Err(anyhow!(ERR_INVALID_GET_VALUE_NO_NAME)),
},
TestData {
param: "==",
result: Err(anyhow!(ERR_INVALID_GET_VALUE_NO_NAME)),
},
TestData {
param: "x=",
result: Err(anyhow!(ERR_INVALID_GET_VALUE_NO_VALUE)),
},
TestData {
param: "x==",
result: Ok("=".into()),
},
TestData {
param: "x===",
result: Ok("==".into()),
},
TestData {
param: "x==x",
result: Ok("=x".into()),
},
TestData {
param: "x=x",
result: Ok("x".into()),
},
TestData {
param: "x=x=",
result: Ok("x=".into()),
},
TestData {
param: "x=x=x",
result: Ok("x=x".into()),
},
TestData {
param: "foo=bar",
result: Ok("bar".into()),
},
TestData {
param: "x= =",
result: Ok(" =".into()),
},
TestData {
param: "x= =",
result: Ok(" =".into()),
},
TestData {
param: "x= = ",
result: Ok(" = ".into()),
},
];
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = get_string_value(d.param);
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
}
}
#[test]
fn test_get_container_policy_path_value() {
#[derive(Debug)]
struct TestData<'a> {
param: &'a str,
result: Result<String>,
}
let tests = &[
TestData {
param: "",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
},
TestData {
param: "agent.container_policy_file",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
},
TestData {
param: "agent.container_policy_file=",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
},
TestData {
param: "foo=bar",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
},
TestData {
param: "agent.policy_path=/another/absolute/path.json",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
},
TestData {
param: "agent.container_policy_file=/etc/container/policy.json",
result: Ok("/etc/container/policy.json".into()),
},
TestData {
param: "agent.container_policy_file=/another/absolute/path.json",
result: Ok("/another/absolute/path.json".into()),
},
TestData {
param: "agent.container_policy_file=./relative/path.json",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
},
TestData {
param: "agent.container_policy_file=./relative/path.json",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
},
TestData {
param: "agent.container_policy_file=../../relative/path.json",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
},
TestData {
param: "agent.container_policy_file=junk_string",
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
},
];
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = get_container_policy_path_value(d.param);
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
}
}
#[test]
fn test_config_builder_from_string() {
let config = AgentConfig::from_str(
r#"
dev_mode = true
server_addr = 'vsock://8:2048'
[endpoints]
allowed = ["CreateContainer", "StartContainer"]
"#,
)
.unwrap();
// Verify that the all_allowed flag is false
assert!(!config.endpoints.all_allowed);
// Verify that the override worked
assert!(config.dev_mode);
assert_eq!(config.server_addr, "vsock://8:2048");
assert_eq!(
config.endpoints.allowed,
vec!["CreateContainer".to_string(), "StartContainer".to_string()]
.iter()
.cloned()
.collect()
);
// Verify that the default values are valid
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
}
}
| 33.00934 | 133 | 0.505285 |
14ead17cf2195c86713cd6394319d21e16e1e308 | 2,638 | /* Copyright (c) Fortanix, Inc.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(asm)]
#![feature(llvm_asm)]
extern crate aesm_client;
extern crate clap;
extern crate sgx_isa;
extern crate sgxs;
extern crate sgxs_loaders;
use std::fs::File;
use clap::{App, Arg};
use aesm_client::AesmClient;
use sgx_isa::Enclu;
use sgxs::loader::{Load, Tcs};
use sgxs::sigstruct::read as read_sigstruct;
use sgxs_loaders::isgx;
fn enclu_eenter(tcs: &mut dyn Tcs) {
let result: u32;
unsafe {
llvm_asm!("
lea aep(%rip),%rcx
jmp enclu
aep:
xor %eax,%eax
jmp post
enclu:
enclu
post:
" : "={eax}"(result)
: "{eax}"(Enclu::EEnter), "{rbx}"(tcs.address())
: "rcx"
: "volatile"
)
};
if result == 0 {
println!("Got AEX");
} else if result == (Enclu::EExit as u32) {
println!("Got EEXIT");
} else {
panic!("Invalid return value in EAX! eax={}", result);
}
}
fn main() {
let matches = App::new("sgxs-load")
.about("SGXS loader")
.arg(
Arg::with_name("debug")
.short("d")
.long("debug")
.requires("le-sgxs")
.help("Request a debug token"),
)
.arg(
Arg::with_name("device")
.long("device")
.takes_value(true)
.help("Sets the SGX device to use (default: /dev/sgx)"),
)
.arg(
Arg::with_name("sgxs")
.required(true)
.help("Sets the enclave SGXS file to use"),
)
.arg(
Arg::with_name("sigstruct")
.required(true)
.help("Sets the enclave SIGSTRUCT file to use"),
)
.get_matches();
let mut dev = match matches.value_of("device") {
Some(dev) => isgx::Device::open(dev),
None => isgx::Device::new(),
}
.unwrap()
.einittoken_provider(AesmClient::new())
.build();
let mut file = File::open(matches.value_of("sgxs").unwrap()).unwrap();
let sigstruct =
read_sigstruct(&mut File::open(matches.value_of("sigstruct").unwrap()).unwrap()).unwrap();
let mut mapping = dev
.load(
&mut file,
&sigstruct,
sigstruct.attributes,
sigstruct.miscselect,
)
.unwrap();
let tcs = &mut mapping.tcss[0];
enclu_eenter(tcs);
}
| 25.61165 | 98 | 0.526156 |
48a66e89d3620fcea98ca816dbbdda45bdfdc159 | 1,861 | // Copyright 2020-2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use crate::Error;
use bee_common::packable::{Packable, Read, Write};
use alloc::boxed::Box;
const ED25519_PUBLIC_KEY_LENGTH: usize = 32;
const ED25519_SIGNATURE_LENGTH: usize = 64;
/// An Ed25519 signature.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Ed25519Signature {
public_key: [u8; ED25519_PUBLIC_KEY_LENGTH],
signature: Box<[u8]>,
}
impl Ed25519Signature {
/// The signature kind of an `Ed25519Signature`.
pub const KIND: u8 = 0;
/// Creates a new `Ed25519Signature`.
pub fn new(public_key: [u8; ED25519_PUBLIC_KEY_LENGTH], signature: [u8; ED25519_SIGNATURE_LENGTH]) -> Self {
Self {
public_key,
signature: Box::new(signature),
}
}
/// Returns the public key of an `Ed25519Signature`.
pub fn public_key(&self) -> &[u8; ED25519_PUBLIC_KEY_LENGTH] {
&self.public_key
}
/// Return the actual signature of an `Ed25519Signature`.
pub fn signature(&self) -> &[u8] {
&self.signature
}
}
impl Packable for Ed25519Signature {
type Error = Error;
fn packed_len(&self) -> usize {
ED25519_PUBLIC_KEY_LENGTH + ED25519_SIGNATURE_LENGTH
}
fn pack<W: Write>(&self, writer: &mut W) -> Result<(), Self::Error> {
self.public_key.pack(writer)?;
writer.write_all(&self.signature)?;
Ok(())
}
fn unpack_inner<R: Read + ?Sized, const CHECK: bool>(reader: &mut R) -> Result<Self, Self::Error> {
let public_key = <[u8; ED25519_PUBLIC_KEY_LENGTH]>::unpack_inner::<R, CHECK>(reader)?;
let signature = <[u8; ED25519_SIGNATURE_LENGTH]>::unpack_inner::<R, CHECK>(reader)?;
Ok(Self::new(public_key, signature))
}
}
| 28.630769 | 112 | 0.648039 |
91bfce6703412d0db74ec582b072f440521e65f0 | 1,318 | //! Módulo para minuto.
//!
//! O formato json desse tipo é apenas um u8.
//!
//! - u8::from(&minute) converte essa referência para um u8;
//! - u8::from(minute) converte este Minute para um u8;
//! - Minute::from(esteu8) converte um valor u8 para Minute;
//!
use near_sdk::{
borsh::{ self, BorshDeserialize, BorshSerialize },
serde::{ Deserialize, Serialize },
};
/// Representa um valor de minuto.
#[derive(BorshDeserialize, BorshSerialize, Clone, Deserialize, Serialize)]
#[serde(crate = "near_sdk::serde")]
pub struct Minute(u8);
impl Minute {
/// Constrói uma instância de minuto.
///
/// # Panics
///
/// Se valor é menor do que 60.
///
pub fn new(minute: u8) -> Minute{
assert!(minute < 60, "Invalid value for minute. Must be lower than 60. Current: {}.", minute);
Minute(minute)
}
}
/// Nos permite usar u8::from(&nossoMinute)
impl From<&Minute> for u8{
fn from(minute: &Minute) -> u8 {
let &Minute(result) = minute;
result
}
}
/// Nos permite usar u8::from(nossoMinute)
impl From<Minute> for u8{
fn from(minute: Minute) -> u8 {
u8::from(&minute)
}
}
/// Nos permite usar Minute::from(nossou8)
impl From<u8> for Minute{
fn from(minute: u8) -> Minute {
Minute::new(minute)
}
}
| 21.606557 | 102 | 0.606222 |
ccdc9adefaa1785c24387ee1deb10e4802ee4201 | 21,598 | #![allow(warnings)]
use log::{error, trace, warn};
use std::convert::TryFrom;
// use std::ffi::c_void;
use std::fmt;
// use std::mem;
use std::rc::Rc;
use crate::convert::{Convert, TryConvert};
use crate::exception::{ExceptionHandler, LastError};
use crate::gc::MrbGarbageCollection;
use crate::sys;
use crate::types::{Int, Ruby, Rust};
use crate::Artichoke;
use crate::ArtichokeError;
/// Max argument count for function calls including initialize and yield.
pub const MRB_FUNCALL_ARGC_MAX: usize = 16;
struct ProtectArgs {
slf: sys::mrb_value,
func_sym: u32,
args: Vec<sys::mrb_value>,
}
struct ProtectArgsWithBlock {
slf: sys::mrb_value,
func_sym: u32,
args: Vec<sys::mrb_value>,
block: sys::mrb_value,
}
impl ProtectArgs {
fn new(slf: sys::mrb_value, func_sym: u32, args: Vec<sys::mrb_value>) -> Self {
Self {
slf,
func_sym,
args,
}
}
fn with_block(self, block: sys::mrb_value) -> ProtectArgsWithBlock {
ProtectArgsWithBlock {
slf: self.slf,
func_sym: self.func_sym,
args: self.args,
block,
}
}
}
#[allow(clippy::module_name_repetitions)]
pub trait ValueLike
where
Self: Sized,
{
fn inner(&self) -> sys::mrb_value;
fn interp(&self) -> &Artichoke;
fn funcall<T, M, A>(&self, func: M, args: A) -> Result<T, ArtichokeError>
where
T: TryConvert<Value, From = Ruby, To = Rust>,
M: AsRef<str>,
A: AsRef<[Value]>,
{
unsafe extern "C" fn run_protected(
mrb: *mut sys::mrb_state,
data: sys::mrb_value,
) -> sys::mrb_value {
let ptr = sys::mrb_sys_cptr_ptr(data);
let args = Box::from_raw(ptr as *mut ProtectArgs);
let value = sys::mrb_funcall_argv(
mrb,
(*args).slf,
(*args).func_sym,
// This will always unwrap because we've already checked that we
// have fewer than `MRB_FUNCALL_ARGC_MAX` args, which is less
// than i64 max value.
Int::try_from((*args).args.len()).unwrap_or_default(),
(*args).args.as_ptr(),
);
sys::mrb_sys_raise_current_exception(mrb);
value
}
// Ensure the borrow is out of scope by the time we eval code since
// Rust-backed files and types may need to mutably borrow the `Artichoke` to
// get access to the underlying `ArtichokeState`.
let (_mrb, _ctx) = {
let borrow = self.interp().borrow();
(borrow.mrb, borrow.ctx)
};
let _arena = self.interp().create_arena_savepoint();
let args = args.as_ref().iter().map(Value::inner).collect::<Vec<_>>();
if args.len() > MRB_FUNCALL_ARGC_MAX {
warn!(
"Too many args supplied to funcall: given {}, max {}.",
args.len(),
MRB_FUNCALL_ARGC_MAX
);
return Err(ArtichokeError::TooManyArgs {
given: args.len(),
max: MRB_FUNCALL_ARGC_MAX,
});
}
trace!(
"Calling {}#{} with {} args",
Ruby::from(self.inner()),
func.as_ref(),
args.len()
);
let _args = Box::new(ProtectArgs::new(
self.inner(),
self.interp().borrow_mut().sym_intern(func.as_ref()),
args,
));
// let value = unsafe {
// //let data = sys::mrb_sys_cptr_value(mrb, Box::into_raw(args) as *mut c_void);
// let data = sys::mrb_sys_nil_value();
// let mut state = <mem::MaybeUninit<sys::mrb_bool>>::uninit();
// let value = sys::mrb_protect(mrb, Some(run_protected), data, state.as_mut_ptr());
// if state.assume_init() != 0 {
// (*mrb).exc = sys::mrb_sys_obj_ptr(value);
// }
// value
// };
let value = Value::new(self.interp(), unsafe { sys::mrb_sys_nil_value() });
match self.interp().last_error() {
LastError::Some(exception) => {
warn!("runtime error with exception backtrace: {}", exception);
Err(ArtichokeError::Exec(exception.to_string()))
}
LastError::UnableToExtract(err) => {
error!("failed to extract exception after runtime error: {}", err);
Err(err)
}
LastError::None if value.is_unreachable() => {
// Unreachable values are internal to the mruby interpreter and
// interacting with them via the C API is unspecified and may
// result in a segfault.
//
// See: https://github.com/mruby/mruby/issues/4460
Err(ArtichokeError::UnreachableValue(value.inner().tt))
}
LastError::None => unsafe {
T::try_convert(self.interp(), value).map_err(ArtichokeError::ConvertToRust)
},
}
}
fn funcall_with_block<T, M, A>(
&self,
func: M,
args: A,
block: Value,
) -> Result<T, ArtichokeError>
where
T: TryConvert<Value, From = Ruby, To = Rust>,
M: AsRef<str>,
A: AsRef<[Value]>,
{
unsafe extern "C" fn run_protected(
mrb: *mut sys::mrb_state,
data: sys::mrb_value,
) -> sys::mrb_value {
let ptr = sys::mrb_sys_cptr_ptr(data);
let args = Box::from_raw(ptr as *mut ProtectArgsWithBlock);
let value = sys::mrb_funcall_with_block(
mrb,
(*args).slf,
(*args).func_sym,
// This will always unwrap because we've already checked that we
// have fewer than `MRB_FUNCALL_ARGC_MAX` args, which is less
// than i64 max value.
Int::try_from((*args).args.len()).unwrap_or_default(),
(*args).args.as_ptr(),
(*args).block,
);
sys::mrb_sys_raise_current_exception(mrb);
value
}
// Ensure the borrow is out of scope by the time we eval code since
// Rust-backed files and types may need to mutably borrow the `Artichoke` to
// get access to the underlying `ArtichokeState`.
let (_mrb, _ctx) = {
let borrow = self.interp().borrow();
(borrow.mrb, borrow.ctx)
};
let _arena = self.interp().create_arena_savepoint();
let args = args.as_ref().iter().map(Value::inner).collect::<Vec<_>>();
if args.len() > MRB_FUNCALL_ARGC_MAX {
warn!(
"Too many args supplied to funcall_with_block: given {}, max {}.",
args.len(),
MRB_FUNCALL_ARGC_MAX
);
return Err(ArtichokeError::TooManyArgs {
given: args.len(),
max: MRB_FUNCALL_ARGC_MAX,
});
}
trace!(
"Calling {}#{} with {} args and block",
Ruby::from(self.inner()),
func.as_ref(),
args.len()
);
let _args = Box::new(
ProtectArgs::new(
self.inner(),
self.interp().borrow_mut().sym_intern(func.as_ref()),
args,
)
.with_block(block.inner()),
);
// let value = unsafe {
// //let data = sys::mrb_sys_cptr_value(mrb, Box::into_raw(args) as *mut c_void);
// let data = sys::mrb_sys_nil_value();
// let mut state = <mem::MaybeUninit<sys::mrb_bool>>::uninit();
// let value = sys::mrb_protect(mrb, Some(run_protected), data, state.as_mut_ptr());
// if state.assume_init() != 0 {
// (*mrb).exc = sys::mrb_sys_obj_ptr(value);
// }
// value
// };
let value = Value::new(self.interp(), unsafe { sys::mrb_sys_nil_value() });
match self.interp().last_error() {
LastError::Some(exception) => {
warn!("runtime error with exception backtrace: {}", exception);
Err(ArtichokeError::Exec(exception.to_string()))
}
LastError::UnableToExtract(err) => {
error!("failed to extract exception after runtime error: {}", err);
Err(err)
}
LastError::None if value.is_unreachable() => {
// Unreachable values are internal to the mruby interpreter and
// interacting with them via the C API is unspecified and may
// result in a segfault.
//
// See: https://github.com/mruby/mruby/issues/4460
Err(ArtichokeError::UnreachableValue(value.inner().tt))
}
LastError::None => unsafe {
T::try_convert(self.interp(), value).map_err(ArtichokeError::ConvertToRust)
},
}
}
fn respond_to(&self, method: &str) -> Result<bool, ArtichokeError> {
let method = Value::convert(self.interp(), method);
self.funcall::<bool, _, _>("respond_to?", &[method])
}
}
/// Wrapper around a [`sys::mrb_value`].
pub struct Value {
interp: Artichoke,
value: sys::mrb_value,
}
impl Value {
/// Construct a new [`Value`] from an interpreter and [`sys::mrb_value`].
pub fn new(interp: &Artichoke, value: sys::mrb_value) -> Self {
Self {
interp: Rc::clone(interp),
value,
}
}
/// The [`sys::mrb_value`] that this [`Value`] wraps.
pub fn inner(&self) -> sys::mrb_value {
self.value
}
/// Return this values [Rust-mapped type tag](Ruby).
pub fn ruby_type(&self) -> Ruby {
Ruby::from(self.value)
}
/// Some type tags like [`MRB_TT_UNDEF`](sys::mrb_vtype::MRB_TT_UNDEF) are
/// internal to the mruby VM and manipulating them with the [`sys`] API is
/// unspecified and may result in a segfault.
///
/// After extracting a [`sys::mrb_value`] from the interpreter, check to see
/// if the value is [unreachable](Ruby::Unreachable) and propagate an
/// [`ArtichokeError::UnreachableValue`](crate::ArtichokeError::UnreachableValue) error.
///
/// See: <https://github.com/mruby/mruby/issues/4460>
pub fn is_unreachable(&self) -> bool {
self.ruby_type() == Ruby::Unreachable
}
/// Prevent this value from being garbage collected.
///
/// Calls [`sys::mrb_gc_protect`] on this value which adds it to the GC
/// arena. This object will remain in the arena until
/// [`ArenaIndex::restore`](crate::gc::ArenaIndex::restore) restores the
/// arena to an index before this call to protect.
pub fn protect(&self) {
// let mrb = self.interp.borrow().mrb;
// let value = self.value;
// unsafe {
// sys::mrb_gc_protect(mrb, value);
// }
}
/// Return whether this object is unreachable by any GC roots.
pub fn is_dead(&self) -> bool {
unsafe { sys::mrb_sys_value_is_dead(self.interp.borrow().mrb, self.value) }
}
/// Call `#to_s` on this [`Value`].
///
/// This function can never fail.
pub fn to_s(&self) -> String {
self.funcall::<String, _, _>("to_s", &[])
.unwrap_or_else(|_| "<unknown>".to_owned())
}
/// Generate a debug representation of self.
///
/// Format:
///
/// ```ruby
/// "#{self.class.name}<#{self.inspect}>"
/// ```
///
/// This function can never fail.
pub fn to_s_debug(&self) -> String {
format!("{}<{}>", self.ruby_type().class_name(), self.inspect())
}
/// Call `#inspect` on this [`Value`].
///
/// This function can never fail.
pub fn inspect(&self) -> String {
self.funcall::<String, _, _>("inspect", &[])
.unwrap_or_else(|_| "<unknown>".to_owned())
}
/// Consume `self` and try to convert `self` to type `T`.
///
/// If you do not want to consume this [`Value`], use [`Value::itself`].
pub fn try_into<T>(self) -> Result<T, ArtichokeError>
where
T: TryConvert<Self, From = Ruby, To = Rust>,
{
let interp = Rc::clone(&self.interp);
unsafe { T::try_convert(&interp, self) }.map_err(ArtichokeError::ConvertToRust)
}
/// Call `#itself` on this [`Value`] and try to convert the result to type
/// `T`.
///
/// If you want to consume this [`Value`], use [`Value::try_into`].
pub fn itself<T>(&self) -> Result<T, ArtichokeError>
where
T: TryConvert<Self, From = Ruby, To = Rust>,
{
self.clone().try_into::<T>()
}
/// Call `#freeze` on this [`Value`] and consume `self`.
pub fn freeze(self) -> Result<Self, ArtichokeError> {
let frozen = self.funcall::<Self, _, _>("freeze", &[])?;
frozen.protect();
Ok(frozen)
}
}
impl ValueLike for Value {
fn inner(&self) -> sys::mrb_value {
self.value
}
fn interp(&self) -> &Artichoke {
&self.interp
}
}
impl Convert<Value> for Value {
type From = Ruby;
type To = Rust;
fn convert(_interp: &Artichoke, value: Self) -> Self {
value
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_s())
}
}
impl fmt::Debug for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_s_debug())
}
}
impl Clone for Value {
fn clone(&self) -> Self {
if self.ruby_type() == Ruby::Data {
panic!("Cannot safely clone a Value with type tag Ruby::Data.");
}
Self {
interp: Rc::clone(&self.interp),
value: self.value,
}
}
}
#[cfg(test)]
mod tests {
use crate::convert::Convert;
use crate::eval::Eval;
use crate::gc::MrbGarbageCollection;
use crate::value::{Value, ValueLike};
use crate::ArtichokeError;
#[test]
fn to_s_true() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, true);
let string = value.to_s();
assert_eq!(string, "true");
}
#[test]
fn debug_true() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, true);
let debug = value.to_s_debug();
assert_eq!(debug, "Boolean<true>");
}
#[test]
fn inspect_true() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, true);
let debug = value.inspect();
assert_eq!(debug, "true");
}
#[test]
fn to_s_false() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, false);
let string = value.to_s();
assert_eq!(string, "false");
}
#[test]
fn debug_false() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, false);
let debug = value.to_s_debug();
assert_eq!(debug, "Boolean<false>");
}
#[test]
fn inspect_false() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, false);
let debug = value.inspect();
assert_eq!(debug, "false");
}
#[test]
fn to_s_nil() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, None::<Value>);
let string = value.to_s();
assert_eq!(string, "");
}
#[test]
fn debug_nil() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, None::<Value>);
let debug = value.to_s_debug();
assert_eq!(debug, "NilClass<nil>");
}
#[test]
fn inspect_nil() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, None::<Value>);
let debug = value.inspect();
assert_eq!(debug, "nil");
}
#[test]
fn to_s_fixnum() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, 255);
let string = value.to_s();
assert_eq!(string, "255");
}
#[test]
fn debug_fixnum() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, 255);
let debug = value.to_s_debug();
assert_eq!(debug, "Fixnum<255>");
}
#[test]
fn inspect_fixnum() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, 255);
let debug = value.inspect();
assert_eq!(debug, "255");
}
#[test]
fn to_s_string() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, "interstate");
let string = value.to_s();
assert_eq!(string, "interstate");
}
#[test]
fn debug_string() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, "interstate");
let debug = value.to_s_debug();
assert_eq!(debug, r#"String<"interstate">"#);
}
#[test]
fn inspect_string() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, "interstate");
let debug = value.inspect();
assert_eq!(debug, r#""interstate""#);
}
#[test]
fn to_s_empty_string() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, "");
let string = value.to_s();
assert_eq!(string, "");
}
#[test]
fn debug_empty_string() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, "");
let debug = value.to_s_debug();
assert_eq!(debug, r#"String<"">"#);
}
#[test]
fn inspect_empty_string() {
let interp = crate::interpreter().expect("init");
let value = Value::convert(&interp, "");
let debug = value.inspect();
assert_eq!(debug, r#""""#);
}
#[test]
fn is_dead() {
let interp = crate::interpreter().expect("init");
let arena = interp.create_arena_savepoint();
let live = interp.eval("'dead'").expect("value");
assert!(!live.is_dead());
let dead = live;
let live = interp.eval("'live'").expect("value");
arena.restore();
interp.full_gc();
// unreachable objects are dead after a full garbage collection
assert!(dead.is_dead());
// the result of the most recent eval is always live even after a full
// garbage collection
assert!(!live.is_dead());
}
#[test]
fn immediate_is_dead() {
let interp = crate::interpreter().expect("init");
let arena = interp.create_arena_savepoint();
let live = interp.eval("27").expect("value");
assert!(!live.is_dead());
let immediate = live;
let live = interp.eval("64").expect("value");
arena.restore();
interp.full_gc();
// immediate objects are never dead
assert!(!immediate.is_dead());
// the result of the most recent eval is always live even after a full
// garbage collection
assert!(!live.is_dead());
// Fixnums are immediate even if they are created directly without an
// interpreter.
let fixnum = Value::convert(&interp, 99);
assert!(!fixnum.is_dead());
}
#[test]
fn funcall() {
let interp = crate::interpreter().expect("init");
let nil = Value::convert(&interp, None::<Value>);
assert!(nil.funcall::<bool, _, _>("nil?", &[]).expect("nil?"));
let s = Value::convert(&interp, "foo");
assert!(!s.funcall::<bool, _, _>("nil?", &[]).expect("nil?"));
let delim = Value::convert(&interp, "");
let split = s
.funcall::<Vec<String>, _, _>("split", &[delim])
.expect("split");
assert_eq!(split, vec!["f".to_owned(), "o".to_owned(), "o".to_owned()])
}
#[test]
fn funcall_different_types() {
let interp = crate::interpreter().expect("init");
let nil = Value::convert(&interp, None::<Value>);
let s = Value::convert(&interp, "foo");
let eql = nil.funcall::<bool, _, _>("==", &[s]);
assert_eq!(eql, Ok(false));
}
#[test]
fn funcall_type_error() {
let interp = crate::interpreter().expect("init");
let nil = Value::convert(&interp, None::<Value>);
let s = Value::convert(&interp, "foo");
let result = s.funcall::<String, _, _>("+", &[nil]);
assert_eq!(
result,
Err(ArtichokeError::Exec(
"TypeError: expected String".to_owned()
))
);
}
#[test]
fn funcall_method_not_exists() {
let interp = crate::interpreter().expect("init");
let nil = Value::convert(&interp, None::<Value>);
let s = Value::convert(&interp, "foo");
let result = nil.funcall::<bool, _, _>("garbage_method_name", &[s]);
assert_eq!(
result,
Err(ArtichokeError::Exec(
"NoMethodError: undefined method 'garbage_method_name'".to_owned()
))
);
}
}
| 31.529927 | 96 | 0.538522 |
de5394bf3c7119089c7f353ef128e9d1d357d569 | 3,694 | // Copyright 2018, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
// Author: Ryan A. Pavlik <[email protected]>
use tokio::prelude::*;
/// Pull as many items from the stream as possible until an error, end of stream, or NotReady.
pub fn drain_stream<T: Stream>(stream: &mut T) -> Poll<(), T::Error> {
drain_poll_fn(|| stream.poll())
}
/// Pull as many items from the poll function as possible until an error, end of stream, or NotReady.
pub fn drain_poll_fn<F, T, E>(mut func: F) -> Poll<(), E>
where
F: FnMut() -> Poll<Option<T>, E>,
{
loop {
match try_ready!(func()) {
Some(_) => {}
None => {
return Ok(Async::Ready(()));
}
}
}
}
pub trait StreamExtras: Stream + Sized {
//fn drain(self) -> Drain<Self>;
fn drain(self) -> Drain<Self> {
Drain::new(self)
}
}
impl<S> StreamExtras for S where S: Stream + Sized {}
#[derive(Debug)]
pub struct Drain<S>
where
S: Stream + Sized,
{
inner: Option<S>,
}
impl<S> Drain<S>
where
S: Stream + Sized,
{
pub fn new(stream: S) -> Drain<S> {
Drain {
inner: Some(stream),
}
}
}
impl<S> Future for Drain<S>
where
S: Stream + Sized,
{
type Item = ();
type Error = S::Error;
fn poll(&mut self) -> Poll<(), Self::Error> {
let inner = self.inner.take();
if let Some(mut stream) = inner {
loop {
match stream.poll() {
Ok(Async::Ready(Some(_))) => {}
Ok(Async::Ready(None)) => {
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => {
break;
}
Err(e) => return Err(From::from(e)),
}
}
self.inner = Some(stream);
}
Ok(Async::NotReady)
}
}
/// Evaluates the expression (`returning Poll<Option<_>, _>`) in a loop,
/// discarding values, until either `Ok(Async::Ready(None))` is returned
/// (indicating end of stream, and making the whole expression evalute to `Async::Ready(())`),
/// `Ok(Async::NotReady)` is returned (making the whole expression evalute to `Async::NotReady`),
/// or an error is returned (causing a return statement to be executed).
#[macro_export]
macro_rules! try_drain {
($e: expr) => {
loop {
match $e {
Ok(Async::Ready(Some(_))) => {}
Ok(Async::Ready(None)) => {
break Async::Ready(());
}
Ok(Async::NotReady) => {
break Async::NotReady;
}
Err(e) => return Err(From::from(e)),
}
}
};
}
/// Evaluates the expression (returning `Poll<Option<_>, _>`) in a loop,
/// discarding values, until either `Ok(Async::Ready(None))` is returned
/// (indicating end of stream, and returning `Ok(Async::Ready(Default::default()))`),
/// `Ok(Async::NotReady)` is returned (making the whole expression evalute to `Async::NotReady`),
/// or an error is returned (causing a return statement to be executed).
///
/// This is useful for things
#[macro_export]
macro_rules! try_drain_return_on_ready {
($e: expr) => {
loop {
match $e {
Ok(Async::Ready(Some(_))) => {}
Ok(Async::Ready(None)) => {
return Ok(Async::Ready(Default::default()));
}
Ok(Async::NotReady) => {
break Async::NotReady;
}
Err(e) => return Err(From::from(e)),
}
}
};
}
| 29.086614 | 101 | 0.509746 |
9b622dc3a8f6c73428bc6f8a3fbf484252f1a831 | 1,122 | use std::sync::Weak;
extern crate rand;
use rand::Rng;
extern crate core;
use self::core::banana::bananaq::FuzzyQ;
use self::core::generator::leaf::IArgLeaf;
use self::core::generator::serialize::ISerializableArg;
/// arg generator for random data
/// Note : as good practice RndData leaf should be always used with
/// ```
/// if config.rnddata_locked { RndData(..) } else { PatternData(Pattern, ..) }
/// ```
/// - this is for debuging, if in dump you encounter random data hard to track particular
/// packet/call which caused it, while with pattern will serve imidiatelly as identifier
pub struct RndData {
size: usize
}
impl RndData {
pub fn new(size: usize) -> RndData {
RndData {
size : size,
}
}
}
impl ISerializableArg for RndData { }
impl IArgLeaf for RndData {
fn size(&self) -> usize { self.size }
fn name(&self) -> &'static str { "RndData" }
fn generate_unsafe(&mut self, _: &Weak<FuzzyQ>, mem: &mut[u8], _: &[u8], _: &[u8]) {
assert!(mem.len() == self.size);//check in debug is OK
rand::thread_rng().fill(mem);
}
}
| 26.714286 | 91 | 0.631907 |
d69219efbd8847a2d519f1a3777ae15a46d242d5 | 111,306 | //! type context book-keeping
use dep_graph::DepGraph;
use dep_graph::{DepNode, DepConstructor};
use errors::DiagnosticBuilder;
use session::Session;
use session::config::{BorrowckMode, OutputFilenames};
use session::config::CrateType;
use middle;
use hir::{TraitCandidate, HirId, ItemKind, ItemLocalId, Node};
use hir::def::{Def, Export};
use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE};
use hir::map as hir_map;
use hir::map::DefPathHash;
use lint::{self, Lint};
use ich::{StableHashingContext, NodeIdHashingMode};
use infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
use infer::outlives::free_region_map::FreeRegionMap;
use middle::cstore::CrateStoreDyn;
use middle::cstore::EncodedMetadata;
use middle::lang_items;
use middle::resolve_lifetime::{self, ObjectLifetimeDefault};
use middle::stability;
use mir::{self, Mir, interpret, ProjectionKind};
use mir::interpret::Allocation;
use ty::subst::{Kind, Substs, Subst};
use ty::ReprOptions;
use traits;
use traits::{Clause, Clauses, GoalKind, Goal, Goals};
use ty::{self, Ty, TypeAndMut};
use ty::{TyS, TyKind, List};
use ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorSubsts, Region, Const, LazyConst};
use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predicate};
use ty::RegionKind;
use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid};
use ty::TyKind::*;
use ty::GenericParamDefKind;
use ty::layout::{LayoutDetails, TargetDataLayout, VariantIdx};
use ty::query;
use ty::steal::Steal;
use ty::subst::{UserSubsts, UnpackedKind};
use ty::{BoundVar, BindingMode};
use ty::CanonicalPolyFnSig;
use util::nodemap::{DefIdMap, DefIdSet, ItemLocalMap};
use util::nodemap::{FxHashMap, FxHashSet};
use rustc_data_structures::interner::HashInterner;
use smallvec::SmallVec;
use rustc_data_structures::stable_hasher::{HashStable, hash_stable_hashmap,
StableHasher, StableHasherResult,
StableVec};
use arena::{TypedArena, SyncDroplessArena};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use rustc_data_structures::sync::{self, Lrc, Lock, WorkerLocal};
use std::any::Any;
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::hash_map::{self, Entry};
use std::hash::{Hash, Hasher};
use std::fmt;
use std::mem;
use std::ops::{Deref, Bound};
use std::ptr;
use std::iter;
use std::sync::mpsc;
use std::sync::Arc;
use std::marker::PhantomData;
use rustc_target::spec::abi;
use syntax::ast::{self, NodeId};
use syntax::attr;
use syntax::source_map::MultiSpan;
use syntax::edition::Edition;
use syntax::feature_gate;
use syntax::symbol::{Symbol, keywords, InternedString};
use syntax_pos::Span;
use hir;
pub struct AllArenas<'tcx> {
pub global: WorkerLocal<GlobalArenas<'tcx>>,
pub interner: SyncDroplessArena,
global_ctxt: Option<GlobalCtxt<'tcx>>,
}
impl<'tcx> AllArenas<'tcx> {
pub fn new() -> Self {
AllArenas {
global: WorkerLocal::new(|_| GlobalArenas::default()),
interner: SyncDroplessArena::default(),
global_ctxt: None,
}
}
}
/// Internal storage
#[derive(Default)]
pub struct GlobalArenas<'tcx> {
// internings
layout: TypedArena<LayoutDetails>,
// references
generics: TypedArena<ty::Generics>,
trait_def: TypedArena<ty::TraitDef>,
adt_def: TypedArena<ty::AdtDef>,
steal_mir: TypedArena<Steal<Mir<'tcx>>>,
mir: TypedArena<Mir<'tcx>>,
tables: TypedArena<ty::TypeckTables<'tcx>>,
/// miri allocations
const_allocs: TypedArena<interpret::Allocation>,
}
type InternedSet<'tcx, T> = Lock<FxHashMap<Interned<'tcx, T>, ()>>;
pub struct CtxtInterners<'tcx> {
/// The arena that types, regions, etc are allocated from
arena: &'tcx SyncDroplessArena,
/// Specifically use a speedy hash algorithm for these hash sets,
/// they're accessed quite often.
type_: InternedSet<'tcx, TyS<'tcx>>,
type_list: InternedSet<'tcx, List<Ty<'tcx>>>,
substs: InternedSet<'tcx, Substs<'tcx>>,
canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo>>,
region: InternedSet<'tcx, RegionKind>,
existential_predicates: InternedSet<'tcx, List<ExistentialPredicate<'tcx>>>,
predicates: InternedSet<'tcx, List<Predicate<'tcx>>>,
clauses: InternedSet<'tcx, List<Clause<'tcx>>>,
goal: InternedSet<'tcx, GoalKind<'tcx>>,
goal_list: InternedSet<'tcx, List<Goal<'tcx>>>,
projs: InternedSet<'tcx, List<ProjectionKind<'tcx>>>,
}
impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> {
fn new(arena: &'tcx SyncDroplessArena) -> CtxtInterners<'tcx> {
CtxtInterners {
arena,
type_: Default::default(),
type_list: Default::default(),
substs: Default::default(),
region: Default::default(),
existential_predicates: Default::default(),
canonical_var_infos: Default::default(),
predicates: Default::default(),
clauses: Default::default(),
goal: Default::default(),
goal_list: Default::default(),
projs: Default::default(),
}
}
/// Intern a type
#[inline(never)]
fn intern_ty(
local: &CtxtInterners<'tcx>,
global: &CtxtInterners<'gcx>,
st: TyKind<'tcx>
) -> Ty<'tcx> {
let flags = super::flags::FlagComputation::for_sty(&st);
// HACK(eddyb) Depend on flags being accurate to
// determine that all contents are in the global tcx.
// See comments on Lift for why we can't use that.
if flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) {
local.type_.borrow_mut().intern(st, |st| {
let ty_struct = TyS {
sty: st,
flags: flags.flags,
outer_exclusive_binder: flags.outer_exclusive_binder,
};
// Make sure we don't end up with inference
// types/regions in the global interner
if ptr::eq(local, global) {
bug!("Attempted to intern `{:?}` which contains \
inference types/regions in the global type context",
&ty_struct);
}
Interned(local.arena.alloc(ty_struct))
}).0
} else {
global.type_.borrow_mut().intern(st, |st| {
let ty_struct = TyS {
sty: st,
flags: flags.flags,
outer_exclusive_binder: flags.outer_exclusive_binder,
};
// This is safe because all the types the ty_struct can point to
// already is in the global arena
let ty_struct: TyS<'gcx> = unsafe {
mem::transmute(ty_struct)
};
Interned(global.arena.alloc(ty_struct))
}).0
}
}
}
pub struct CommonTypes<'tcx> {
pub unit: Ty<'tcx>,
pub bool: Ty<'tcx>,
pub char: Ty<'tcx>,
pub isize: Ty<'tcx>,
pub i8: Ty<'tcx>,
pub i16: Ty<'tcx>,
pub i32: Ty<'tcx>,
pub i64: Ty<'tcx>,
pub i128: Ty<'tcx>,
pub usize: Ty<'tcx>,
pub u8: Ty<'tcx>,
pub u16: Ty<'tcx>,
pub u32: Ty<'tcx>,
pub u64: Ty<'tcx>,
pub u128: Ty<'tcx>,
pub f32: Ty<'tcx>,
pub f64: Ty<'tcx>,
pub never: Ty<'tcx>,
pub err: Ty<'tcx>,
pub re_empty: Region<'tcx>,
pub re_static: Region<'tcx>,
pub re_erased: Region<'tcx>,
}
pub struct LocalTableInContext<'a, V: 'a> {
local_id_root: Option<DefId>,
data: &'a ItemLocalMap<V>
}
/// Validate that the given HirId (respectively its `local_id` part) can be
/// safely used as a key in the tables of a TypeckTable. For that to be
/// the case, the HirId must have the same `owner` as all the other IDs in
/// this table (signified by `local_id_root`). Otherwise the HirId
/// would be in a different frame of reference and using its `local_id`
/// would result in lookup errors, or worse, in silently wrong data being
/// stored/returned.
fn validate_hir_id_for_typeck_tables(local_id_root: Option<DefId>,
hir_id: hir::HirId,
mut_access: bool) {
if cfg!(debug_assertions) {
if let Some(local_id_root) = local_id_root {
if hir_id.owner != local_id_root.index {
ty::tls::with(|tcx| {
let node_id = tcx.hir().hir_to_node_id(hir_id);
bug!("node {} with HirId::owner {:?} cannot be placed in \
TypeckTables with local_id_root {:?}",
tcx.hir().node_to_string(node_id),
DefId::local(hir_id.owner),
local_id_root)
});
}
} else {
// We use "Null Object" TypeckTables in some of the analysis passes.
// These are just expected to be empty and their `local_id_root` is
// `None`. Therefore we cannot verify whether a given `HirId` would
// be a valid key for the given table. Instead we make sure that
// nobody tries to write to such a Null Object table.
if mut_access {
bug!("access to invalid TypeckTables")
}
}
}
}
impl<'a, V> LocalTableInContext<'a, V> {
pub fn contains_key(&self, id: hir::HirId) -> bool {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.data.contains_key(&id.local_id)
}
pub fn get(&self, id: hir::HirId) -> Option<&V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.data.get(&id.local_id)
}
pub fn iter(&self) -> hash_map::Iter<'_, hir::ItemLocalId, V> {
self.data.iter()
}
}
impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> {
type Output = V;
fn index(&self, key: hir::HirId) -> &V {
self.get(key).expect("LocalTableInContext: key not found")
}
}
pub struct LocalTableInContextMut<'a, V: 'a> {
local_id_root: Option<DefId>,
data: &'a mut ItemLocalMap<V>
}
impl<'a, V> LocalTableInContextMut<'a, V> {
pub fn get_mut(&mut self, id: hir::HirId) -> Option<&mut V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.get_mut(&id.local_id)
}
pub fn entry(&mut self, id: hir::HirId) -> Entry<'_, hir::ItemLocalId, V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.entry(id.local_id)
}
pub fn insert(&mut self, id: hir::HirId, val: V) -> Option<V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.insert(id.local_id, val)
}
pub fn remove(&mut self, id: hir::HirId) -> Option<V> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
self.data.remove(&id.local_id)
}
}
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct TypeckTables<'tcx> {
/// The HirId::owner all ItemLocalIds in this table are relative to.
pub local_id_root: Option<DefId>,
/// Resolved definitions for `<T>::X` associated paths and
/// method calls, including those of overloaded operators.
type_dependent_defs: ItemLocalMap<Def>,
/// Resolved field indices for field accesses in expressions (`S { field }`, `obj.field`)
/// or patterns (`S { field }`). The index is often useful by itself, but to learn more
/// about the field you also need definition of the variant to which the field
/// belongs, but it may not exist if it's a tuple field (`tuple.0`).
field_indices: ItemLocalMap<usize>,
/// Stores the types for various nodes in the AST. Note that this table
/// is not guaranteed to be populated until after typeck. See
/// typeck::check::fn_ctxt for details.
node_types: ItemLocalMap<Ty<'tcx>>,
/// Stores the type parameters which were substituted to obtain the type
/// of this node. This only applies to nodes that refer to entities
/// parameterized by type parameters, such as generic fns, types, or
/// other items.
node_substs: ItemLocalMap<&'tcx Substs<'tcx>>,
/// This will either store the canonicalized types provided by the user
/// or the substitutions that the user explicitly gave (if any) attached
/// to `id`. These will not include any inferred values. The canonical form
/// is used to capture things like `_` or other unspecified values.
///
/// For example, if the user wrote `foo.collect::<Vec<_>>()`, then the
/// canonical substitutions would include only `for<X> { Vec<X> }`.
///
/// See also `AscribeUserType` statement in MIR.
user_provided_types: ItemLocalMap<CanonicalUserTypeAnnotation<'tcx>>,
/// Stores the canonicalized types provided by the user. See also
/// `AscribeUserType` statement in MIR.
pub user_provided_sigs: DefIdMap<CanonicalPolyFnSig<'tcx>>,
adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
/// Stores the actual binding mode for all instances of hir::BindingAnnotation.
pat_binding_modes: ItemLocalMap<BindingMode>,
/// Stores the types which were implicitly dereferenced in pattern binding modes
/// for later usage in HAIR lowering. For example,
///
/// ```
/// match &&Some(5i32) {
/// Some(n) => {},
/// _ => {},
/// }
/// ```
/// leads to a `vec![&&Option<i32>, &Option<i32>]`. Empty vectors are not stored.
///
/// See:
/// https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions
pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>,
/// Borrows
pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>,
/// Records the reasons that we picked the kind of each closure;
/// not all closures are present in the map.
closure_kind_origins: ItemLocalMap<(Span, ast::Name)>,
/// For each fn, records the "liberated" types of its arguments
/// and return type. Liberated means that all bound regions
/// (including late-bound regions) are replaced with free
/// equivalents. This table is not used in codegen (since regions
/// are erased there) and hence is not serialized to metadata.
liberated_fn_sigs: ItemLocalMap<ty::FnSig<'tcx>>,
/// For each FRU expression, record the normalized types of the fields
/// of the struct - this is needed because it is non-trivial to
/// normalize while preserving regions. This table is used only in
/// MIR construction and hence is not serialized to metadata.
fru_field_types: ItemLocalMap<Vec<Ty<'tcx>>>,
/// Maps a cast expression to its kind. This is keyed on the
/// *from* expression of the cast, not the cast itself.
cast_kinds: ItemLocalMap<ty::cast::CastKind>,
/// Set of trait imports actually used in the method resolution.
/// This is used for warning unused imports. During type
/// checking, this `Lrc` should not be cloned: it must have a ref-count
/// of 1 so that we can insert things into the set mutably.
pub used_trait_imports: Lrc<DefIdSet>,
/// If any errors occurred while type-checking this body,
/// this field will be set to `true`.
pub tainted_by_errors: bool,
/// Stores the free-region relationships that were deduced from
/// its where clauses and parameter types. These are then
/// read-again by borrowck.
pub free_region_map: FreeRegionMap<'tcx>,
/// All the existential types that are restricted to concrete types
/// by this function
pub concrete_existential_types: FxHashMap<DefId, Ty<'tcx>>,
/// Given the closure ID this map provides the list of UpvarIDs used by it.
/// The upvarID contains the HIR node ID and it also contains the full path
/// leading to the member of the struct or tuple that is used instead of the
/// entire variable.
pub upvar_list: ty::UpvarListMap,
}
impl<'tcx> TypeckTables<'tcx> {
pub fn empty(local_id_root: Option<DefId>) -> TypeckTables<'tcx> {
TypeckTables {
local_id_root,
type_dependent_defs: Default::default(),
field_indices: Default::default(),
user_provided_types: Default::default(),
user_provided_sigs: Default::default(),
node_types: Default::default(),
node_substs: Default::default(),
adjustments: Default::default(),
pat_binding_modes: Default::default(),
pat_adjustments: Default::default(),
upvar_capture_map: Default::default(),
closure_kind_origins: Default::default(),
liberated_fn_sigs: Default::default(),
fru_field_types: Default::default(),
cast_kinds: Default::default(),
used_trait_imports: Lrc::new(Default::default()),
tainted_by_errors: false,
free_region_map: Default::default(),
concrete_existential_types: Default::default(),
upvar_list: Default::default(),
}
}
/// Returns the final resolution of a `QPath` in an `Expr` or `Pat` node.
pub fn qpath_def(&self, qpath: &hir::QPath, id: hir::HirId) -> Def {
match *qpath {
hir::QPath::Resolved(_, ref path) => path.def,
hir::QPath::TypeRelative(..) => {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.type_dependent_defs.get(&id.local_id).cloned().unwrap_or(Def::Err)
}
}
}
pub fn type_dependent_defs(&self) -> LocalTableInContext<'_, Def> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.type_dependent_defs
}
}
pub fn type_dependent_defs_mut(&mut self) -> LocalTableInContextMut<'_, Def> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.type_dependent_defs
}
}
pub fn field_indices(&self) -> LocalTableInContext<'_, usize> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.field_indices
}
}
pub fn field_indices_mut(&mut self) -> LocalTableInContextMut<'_, usize> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.field_indices
}
}
pub fn user_provided_types(
&self
) -> LocalTableInContext<'_, CanonicalUserTypeAnnotation<'tcx>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.user_provided_types
}
}
pub fn user_provided_types_mut(
&mut self
) -> LocalTableInContextMut<'_, CanonicalUserTypeAnnotation<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.user_provided_types
}
}
pub fn node_types(&self) -> LocalTableInContext<'_, Ty<'tcx>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.node_types
}
}
pub fn node_types_mut(&mut self) -> LocalTableInContextMut<'_, Ty<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.node_types
}
}
pub fn node_id_to_type(&self, id: hir::HirId) -> Ty<'tcx> {
self.node_id_to_type_opt(id).unwrap_or_else(||
bug!("node_id_to_type: no type for node `{}`",
tls::with(|tcx| {
let id = tcx.hir().hir_to_node_id(id);
tcx.hir().node_to_string(id)
}))
)
}
pub fn node_id_to_type_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.node_types.get(&id.local_id).cloned()
}
pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, &'tcx Substs<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.node_substs
}
}
pub fn node_substs(&self, id: hir::HirId) -> &'tcx Substs<'tcx> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| Substs::empty())
}
pub fn node_substs_opt(&self, id: hir::HirId) -> Option<&'tcx Substs<'tcx>> {
validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
self.node_substs.get(&id.local_id).cloned()
}
// Returns the type of a pattern as a monotype. Like @expr_ty, this function
// doesn't provide type parameter substitutions.
pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> {
self.node_id_to_type(pat.hir_id)
}
pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option<Ty<'tcx>> {
self.node_id_to_type_opt(pat.hir_id)
}
// Returns the type of an expression as a monotype.
//
// NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in
// some cases, we insert `Adjustment` annotations such as auto-deref or
// auto-ref. The type returned by this function does not consider such
// adjustments. See `expr_ty_adjusted()` instead.
//
// NB (2): This type doesn't provide type parameter substitutions; e.g., if you
// ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
// instead of "fn(ty) -> T with T = isize".
pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> {
self.node_id_to_type(expr.hir_id)
}
pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
self.node_id_to_type_opt(expr.hir_id)
}
pub fn adjustments(&self) -> LocalTableInContext<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.adjustments
}
}
pub fn adjustments_mut(&mut self)
-> LocalTableInContextMut<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.adjustments
}
}
pub fn expr_adjustments(&self, expr: &hir::Expr)
-> &[ty::adjustment::Adjustment<'tcx>] {
validate_hir_id_for_typeck_tables(self.local_id_root, expr.hir_id, false);
self.adjustments.get(&expr.hir_id.local_id).map_or(&[], |a| &a[..])
}
/// Returns the type of `expr`, considering any `Adjustment`
/// entry recorded for that expression.
pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> Ty<'tcx> {
self.expr_adjustments(expr)
.last()
.map_or_else(|| self.expr_ty(expr), |adj| adj.target)
}
pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
self.expr_adjustments(expr)
.last()
.map(|adj| adj.target)
.or_else(|| self.expr_ty_opt(expr))
}
pub fn is_method_call(&self, expr: &hir::Expr) -> bool {
// Only paths and method calls/overloaded operators have
// entries in type_dependent_defs, ignore the former here.
if let hir::ExprKind::Path(_) = expr.node {
return false;
}
match self.type_dependent_defs().get(expr.hir_id) {
Some(&Def::Method(_)) => true,
_ => false
}
}
pub fn pat_binding_modes(&self) -> LocalTableInContext<'_, BindingMode> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.pat_binding_modes
}
}
pub fn pat_binding_modes_mut(&mut self)
-> LocalTableInContextMut<'_, BindingMode> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.pat_binding_modes
}
}
pub fn pat_adjustments(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.pat_adjustments,
}
}
pub fn pat_adjustments_mut(&mut self)
-> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.pat_adjustments,
}
}
pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> {
self.upvar_capture_map[&upvar_id]
}
pub fn closure_kind_origins(&self) -> LocalTableInContext<'_, (Span, ast::Name)> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.closure_kind_origins
}
}
pub fn closure_kind_origins_mut(&mut self) -> LocalTableInContextMut<'_, (Span, ast::Name)> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.closure_kind_origins
}
}
pub fn liberated_fn_sigs(&self) -> LocalTableInContext<'_, ty::FnSig<'tcx>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.liberated_fn_sigs
}
}
pub fn liberated_fn_sigs_mut(&mut self) -> LocalTableInContextMut<'_, ty::FnSig<'tcx>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.liberated_fn_sigs
}
}
pub fn fru_field_types(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.fru_field_types
}
}
pub fn fru_field_types_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.fru_field_types
}
}
pub fn cast_kinds(&self) -> LocalTableInContext<'_, ty::cast::CastKind> {
LocalTableInContext {
local_id_root: self.local_id_root,
data: &self.cast_kinds
}
}
pub fn cast_kinds_mut(&mut self) -> LocalTableInContextMut<'_, ty::cast::CastKind> {
LocalTableInContextMut {
local_id_root: self.local_id_root,
data: &mut self.cast_kinds
}
}
}
impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for TypeckTables<'gcx> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
let ty::TypeckTables {
local_id_root,
ref type_dependent_defs,
ref field_indices,
ref user_provided_types,
ref user_provided_sigs,
ref node_types,
ref node_substs,
ref adjustments,
ref pat_binding_modes,
ref pat_adjustments,
ref upvar_capture_map,
ref closure_kind_origins,
ref liberated_fn_sigs,
ref fru_field_types,
ref cast_kinds,
ref used_trait_imports,
tainted_by_errors,
ref free_region_map,
ref concrete_existential_types,
ref upvar_list,
} = *self;
hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
type_dependent_defs.hash_stable(hcx, hasher);
field_indices.hash_stable(hcx, hasher);
user_provided_types.hash_stable(hcx, hasher);
user_provided_sigs.hash_stable(hcx, hasher);
node_types.hash_stable(hcx, hasher);
node_substs.hash_stable(hcx, hasher);
adjustments.hash_stable(hcx, hasher);
pat_binding_modes.hash_stable(hcx, hasher);
pat_adjustments.hash_stable(hcx, hasher);
hash_stable_hashmap(hcx, hasher, upvar_capture_map, |up_var_id, hcx| {
let ty::UpvarId {
var_path,
closure_expr_id
} = *up_var_id;
let local_id_root =
local_id_root.expect("trying to hash invalid TypeckTables");
let var_owner_def_id = DefId {
krate: local_id_root.krate,
index: var_path.hir_id.owner,
};
let closure_def_id = DefId {
krate: local_id_root.krate,
index: closure_expr_id.to_def_id().index,
};
(hcx.def_path_hash(var_owner_def_id),
var_path.hir_id.local_id,
hcx.def_path_hash(closure_def_id))
});
closure_kind_origins.hash_stable(hcx, hasher);
liberated_fn_sigs.hash_stable(hcx, hasher);
fru_field_types.hash_stable(hcx, hasher);
cast_kinds.hash_stable(hcx, hasher);
used_trait_imports.hash_stable(hcx, hasher);
tainted_by_errors.hash_stable(hcx, hasher);
free_region_map.hash_stable(hcx, hasher);
concrete_existential_types.hash_stable(hcx, hasher);
upvar_list.hash_stable(hcx, hasher);
})
}
}
newtype_index! {
pub struct UserTypeAnnotationIndex {
DEBUG_FORMAT = "UserTypeAnnotation({})",
const START_INDEX = 0,
}
}
/// Mapping of type annotation indices to canonical user type annotations.
pub type CanonicalUserTypeAnnotations<'tcx> =
IndexVec<UserTypeAnnotationIndex, (Span, CanonicalUserTypeAnnotation<'tcx>)>;
/// Canonicalized user type annotation.
pub type CanonicalUserTypeAnnotation<'gcx> = Canonical<'gcx, UserTypeAnnotation<'gcx>>;
impl CanonicalUserTypeAnnotation<'gcx> {
/// Returns `true` if this represents a substitution of the form `[?0, ?1, ?2]`,
/// i.e. each thing is mapped to a canonical variable with the same index.
pub fn is_identity(&self) -> bool {
match self.value {
UserTypeAnnotation::Ty(_) => false,
UserTypeAnnotation::TypeOf(_, user_substs) => {
if user_substs.user_self_ty.is_some() {
return false;
}
user_substs.substs.iter().zip(BoundVar::new(0)..).all(|(kind, cvar)| {
match kind.unpack() {
UnpackedKind::Type(ty) => match ty.sty {
ty::Bound(debruijn, b) => {
// We only allow a `ty::INNERMOST` index in substitutions.
assert_eq!(debruijn, ty::INNERMOST);
cvar == b.var
}
_ => false,
},
UnpackedKind::Lifetime(r) => match r {
ty::ReLateBound(debruijn, br) => {
// We only allow a `ty::INNERMOST` index in substitutions.
assert_eq!(*debruijn, ty::INNERMOST);
cvar == br.assert_bound_var()
}
_ => false,
},
}
})
},
}
}
}
/// A user-given type annotation attached to a constant. These arise
/// from constants that are named via paths, like `Foo::<A>::new` and
/// so forth.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum UserTypeAnnotation<'tcx> {
Ty(Ty<'tcx>),
/// The canonical type is the result of `type_of(def_id)` with the
/// given substitutions applied.
TypeOf(DefId, UserSubsts<'tcx>),
}
EnumTypeFoldableImpl! {
impl<'tcx> TypeFoldable<'tcx> for UserTypeAnnotation<'tcx> {
(UserTypeAnnotation::Ty)(ty),
(UserTypeAnnotation::TypeOf)(def, substs),
}
}
EnumLiftImpl! {
impl<'a, 'tcx> Lift<'tcx> for UserTypeAnnotation<'a> {
type Lifted = UserTypeAnnotation<'tcx>;
(UserTypeAnnotation::Ty)(ty),
(UserTypeAnnotation::TypeOf)(def, substs),
}
}
impl<'tcx> CommonTypes<'tcx> {
fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> {
let mk = |sty| CtxtInterners::intern_ty(interners, interners, sty);
let mk_region = |r| {
interners.region.borrow_mut().intern(r, |r| {
Interned(interners.arena.alloc(r))
}).0
};
CommonTypes {
unit: mk(Tuple(List::empty())),
bool: mk(Bool),
char: mk(Char),
never: mk(Never),
err: mk(Error),
isize: mk(Int(ast::IntTy::Isize)),
i8: mk(Int(ast::IntTy::I8)),
i16: mk(Int(ast::IntTy::I16)),
i32: mk(Int(ast::IntTy::I32)),
i64: mk(Int(ast::IntTy::I64)),
i128: mk(Int(ast::IntTy::I128)),
usize: mk(Uint(ast::UintTy::Usize)),
u8: mk(Uint(ast::UintTy::U8)),
u16: mk(Uint(ast::UintTy::U16)),
u32: mk(Uint(ast::UintTy::U32)),
u64: mk(Uint(ast::UintTy::U64)),
u128: mk(Uint(ast::UintTy::U128)),
f32: mk(Float(ast::FloatTy::F32)),
f64: mk(Float(ast::FloatTy::F64)),
re_empty: mk_region(RegionKind::ReEmpty),
re_static: mk_region(RegionKind::ReStatic),
re_erased: mk_region(RegionKind::ReErased),
}
}
}
// This struct contains information regarding the `ReFree(FreeRegion)` corresponding to a lifetime
// conflict.
#[derive(Debug)]
pub struct FreeRegionInfo {
// def id corresponding to FreeRegion
pub def_id: DefId,
// the bound region corresponding to FreeRegion
pub boundregion: ty::BoundRegion,
// checks if bound region is in Impl Item
pub is_impl_item: bool,
}
/// The central data structure of the compiler. It stores references
/// to the various **arenas** and also houses the results of the
/// various **compiler queries** that have been performed. See the
/// [rustc guide] for more details.
///
/// [rustc guide]: https://rust-lang.github.io/rustc-guide/ty.html
#[derive(Copy, Clone)]
pub struct TyCtxt<'a, 'gcx: 'tcx, 'tcx: 'a> {
gcx: &'gcx GlobalCtxt<'gcx>,
interners: &'tcx CtxtInterners<'tcx>,
dummy: PhantomData<&'a ()>,
}
impl<'gcx> Deref for TyCtxt<'_, 'gcx, '_> {
type Target = &'gcx GlobalCtxt<'gcx>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.gcx
}
}
pub struct GlobalCtxt<'tcx> {
global_arenas: &'tcx WorkerLocal<GlobalArenas<'tcx>>,
global_interners: CtxtInterners<'tcx>,
cstore: &'tcx CrateStoreDyn,
pub sess: &'tcx Session,
pub dep_graph: DepGraph,
/// Common types, pre-interned for your convenience.
pub types: CommonTypes<'tcx>,
/// Map indicating what traits are in scope for places where this
/// is relevant; generated by resolve.
trait_map: FxHashMap<DefIndex,
Lrc<FxHashMap<ItemLocalId,
Lrc<StableVec<TraitCandidate>>>>>,
/// Export map produced by name resolution.
export_map: FxHashMap<DefId, Lrc<Vec<Export>>>,
hir_map: hir_map::Map<'tcx>,
/// A map from DefPathHash -> DefId. Includes DefIds from the local crate
/// as well as all upstream crates. Only populated in incremental mode.
pub def_path_hash_to_def_id: Option<FxHashMap<DefPathHash, DefId>>,
pub queries: query::Queries<'tcx>,
// Records the free variables referenced by every closure
// expression. Do not track deps for this, just recompute it from
// scratch every time.
freevars: FxHashMap<DefId, Lrc<Vec<hir::Freevar>>>,
maybe_unused_trait_imports: FxHashSet<DefId>,
maybe_unused_extern_crates: Vec<(DefId, Span)>,
/// Extern prelude entries. The value is `true` if the entry was introduced
/// via `extern crate` item and not `--extern` option or compiler built-in.
pub extern_prelude: FxHashMap<ast::Name, bool>,
// Internal cache for metadata decoding. No need to track deps on this.
pub rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
/// Caches the results of trait selection. This cache is used
/// for things that do not have to do with the parameters in scope.
pub selection_cache: traits::SelectionCache<'tcx>,
/// Caches the results of trait evaluation. This cache is used
/// for things that do not have to do with the parameters in scope.
/// Merge this with `selection_cache`?
pub evaluation_cache: traits::EvaluationCache<'tcx>,
/// The definite name of the current crate after taking into account
/// attributes, commandline parameters, etc.
pub crate_name: Symbol,
/// Data layout specification for the current target.
pub data_layout: TargetDataLayout,
stability_interner: Lock<FxHashMap<&'tcx attr::Stability, ()>>,
/// Stores the value of constants (and deduplicates the actual memory)
allocation_interner: Lock<FxHashMap<&'tcx Allocation, ()>>,
pub alloc_map: Lock<interpret::AllocMap<'tcx>>,
layout_interner: Lock<FxHashMap<&'tcx LayoutDetails, ()>>,
/// A general purpose channel to throw data out the back towards LLVM worker
/// threads.
///
/// This is intended to only get used during the codegen phase of the compiler
/// when satisfying the query for a particular codegen unit. Internally in
/// the query it'll send data along this channel to get processed later.
pub tx_to_llvm_workers: Lock<mpsc::Sender<Box<dyn Any + Send>>>,
output_filenames: Arc<OutputFilenames>,
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// Get the global TyCtxt.
#[inline]
pub fn global_tcx(self) -> TyCtxt<'gcx, 'gcx, 'gcx> {
TyCtxt {
gcx: self.gcx,
interners: &self.gcx.global_interners,
dummy: PhantomData,
}
}
#[inline(always)]
pub fn hir(self) -> &'a hir_map::Map<'gcx> {
&self.hir_map
}
pub fn alloc_generics(self, generics: ty::Generics) -> &'gcx ty::Generics {
self.global_arenas.generics.alloc(generics)
}
pub fn alloc_steal_mir(self, mir: Mir<'gcx>) -> &'gcx Steal<Mir<'gcx>> {
self.global_arenas.steal_mir.alloc(Steal::new(mir))
}
pub fn alloc_mir(self, mir: Mir<'gcx>) -> &'gcx Mir<'gcx> {
self.global_arenas.mir.alloc(mir)
}
pub fn alloc_tables(self, tables: ty::TypeckTables<'gcx>) -> &'gcx ty::TypeckTables<'gcx> {
self.global_arenas.tables.alloc(tables)
}
pub fn alloc_trait_def(self, def: ty::TraitDef) -> &'gcx ty::TraitDef {
self.global_arenas.trait_def.alloc(def)
}
pub fn alloc_adt_def(self,
did: DefId,
kind: AdtKind,
variants: IndexVec<VariantIdx, ty::VariantDef>,
repr: ReprOptions)
-> &'gcx ty::AdtDef {
let def = ty::AdtDef::new(self, did, kind, variants, repr);
self.global_arenas.adt_def.alloc(def)
}
pub fn intern_const_alloc(
self,
alloc: Allocation,
) -> &'gcx Allocation {
self.allocation_interner.borrow_mut().intern(alloc, |alloc| {
self.global_arenas.const_allocs.alloc(alloc)
})
}
/// Allocates a byte or string literal for `mir::interpret`, read-only
pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId {
// create an allocation that just contains these bytes
let alloc = interpret::Allocation::from_byte_aligned_bytes(bytes, ());
let alloc = self.intern_const_alloc(alloc);
self.alloc_map.lock().allocate(alloc)
}
pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability {
self.stability_interner.borrow_mut().intern(stab, |stab| {
self.global_interners.arena.alloc(stab)
})
}
pub fn intern_lazy_const(self, c: ty::LazyConst<'tcx>) -> &'tcx ty::LazyConst<'tcx> {
self.global_interners.arena.alloc(c)
}
pub fn intern_layout(self, layout: LayoutDetails) -> &'gcx LayoutDetails {
self.layout_interner.borrow_mut().intern(layout, |layout| {
self.global_arenas.layout.alloc(layout)
})
}
/// Returns a range of the start/end indices specified with the
/// `rustc_layout_scalar_valid_range` attribute.
pub fn layout_scalar_valid_range(self, def_id: DefId) -> (Bound<u128>, Bound<u128>) {
let attrs = self.get_attrs(def_id);
let get = |name| {
let attr = match attrs.iter().find(|a| a.check_name(name)) {
Some(attr) => attr,
None => return Bound::Unbounded,
};
for meta in attr.meta_item_list().expect("rustc_layout_scalar_valid_range takes args") {
match meta.literal().expect("attribute takes lit").node {
ast::LitKind::Int(a, _) => return Bound::Included(a),
_ => span_bug!(attr.span, "rustc_layout_scalar_valid_range expects int arg"),
}
}
span_bug!(attr.span, "no arguments to `rustc_layout_scalar_valid_range` attribute");
};
(get("rustc_layout_scalar_valid_range_start"), get("rustc_layout_scalar_valid_range_end"))
}
pub fn lift<T: ?Sized + Lift<'tcx>>(self, value: &T) -> Option<T::Lifted> {
value.lift_to_tcx(self)
}
/// Like lift, but only tries in the global tcx.
pub fn lift_to_global<T: ?Sized + Lift<'gcx>>(self, value: &T) -> Option<T::Lifted> {
value.lift_to_tcx(self.global_tcx())
}
/// Returns true if self is the same as self.global_tcx().
fn is_global(self) -> bool {
ptr::eq(self.interners, &self.global_interners)
}
/// Create a type context and call the closure with a `TyCtxt` reference
/// to the context. The closure enforces that the type context and any interned
/// value (types, substs, etc.) can only be used while `ty::tls` has a valid
/// reference to the context, to allow formatting values that need it.
pub fn create_and_enter<F, R>(s: &'tcx Session,
cstore: &'tcx CrateStoreDyn,
local_providers: ty::query::Providers<'tcx>,
extern_providers: ty::query::Providers<'tcx>,
arenas: &'tcx mut AllArenas<'tcx>,
resolutions: ty::Resolutions,
hir: hir_map::Map<'tcx>,
on_disk_query_result_cache: query::OnDiskCache<'tcx>,
crate_name: &str,
tx: mpsc::Sender<Box<dyn Any + Send>>,
output_filenames: &OutputFilenames,
f: F) -> R
where F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'tcx>) -> R
{
let data_layout = TargetDataLayout::parse(&s.target.target).unwrap_or_else(|err| {
s.fatal(&err);
});
let interners = CtxtInterners::new(&arenas.interner);
let common_types = CommonTypes::new(&interners);
let dep_graph = hir.dep_graph.clone();
let max_cnum = cstore.crates_untracked().iter().map(|c| c.as_usize()).max().unwrap_or(0);
let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1);
providers[LOCAL_CRATE] = local_providers;
let def_path_hash_to_def_id = if s.opts.build_dep_graph() {
let upstream_def_path_tables: Vec<(CrateNum, Lrc<_>)> = cstore
.crates_untracked()
.iter()
.map(|&cnum| (cnum, cstore.def_path_table(cnum)))
.collect();
let def_path_tables = || {
upstream_def_path_tables
.iter()
.map(|&(cnum, ref rc)| (cnum, &**rc))
.chain(iter::once((LOCAL_CRATE, hir.definitions().def_path_table())))
};
// Precompute the capacity of the hashmap so we don't have to
// re-allocate when populating it.
let capacity = def_path_tables().map(|(_, t)| t.size()).sum::<usize>();
let mut map: FxHashMap<_, _> = FxHashMap::with_capacity_and_hasher(
capacity,
::std::default::Default::default()
);
for (cnum, def_path_table) in def_path_tables() {
def_path_table.add_def_path_hashes_to(cnum, &mut map);
}
Some(map)
} else {
None
};
let mut trait_map: FxHashMap<_, Lrc<FxHashMap<_, _>>> = FxHashMap::default();
for (k, v) in resolutions.trait_map {
let hir_id = hir.node_to_hir_id(k);
let map = trait_map.entry(hir_id.owner).or_default();
Lrc::get_mut(map).unwrap()
.insert(hir_id.local_id,
Lrc::new(StableVec::new(v)));
}
arenas.global_ctxt = Some(GlobalCtxt {
sess: s,
cstore,
global_arenas: &arenas.global,
global_interners: interners,
dep_graph,
types: common_types,
trait_map,
export_map: resolutions.export_map.into_iter().map(|(k, v)| {
(k, Lrc::new(v))
}).collect(),
freevars: resolutions.freevars.into_iter().map(|(k, v)| {
(hir.local_def_id(k), Lrc::new(v))
}).collect(),
maybe_unused_trait_imports:
resolutions.maybe_unused_trait_imports
.into_iter()
.map(|id| hir.local_def_id(id))
.collect(),
maybe_unused_extern_crates:
resolutions.maybe_unused_extern_crates
.into_iter()
.map(|(id, sp)| (hir.local_def_id(id), sp))
.collect(),
extern_prelude: resolutions.extern_prelude,
hir_map: hir,
def_path_hash_to_def_id,
queries: query::Queries::new(
providers,
extern_providers,
on_disk_query_result_cache,
),
rcache: Default::default(),
selection_cache: Default::default(),
evaluation_cache: Default::default(),
crate_name: Symbol::intern(crate_name),
data_layout,
layout_interner: Default::default(),
stability_interner: Default::default(),
allocation_interner: Default::default(),
alloc_map: Lock::new(interpret::AllocMap::new()),
tx_to_llvm_workers: Lock::new(tx),
output_filenames: Arc::new(output_filenames.clone()),
});
let gcx = arenas.global_ctxt.as_ref().unwrap();
sync::assert_send_val(&gcx);
let r = tls::enter_global(gcx, f);
gcx.queries.record_computed_queries(s);
r
}
pub fn consider_optimizing<T: Fn() -> String>(&self, msg: T) -> bool {
let cname = self.crate_name(LOCAL_CRATE).as_str();
self.sess.consider_optimizing(&cname, msg)
}
pub fn lib_features(self) -> Lrc<middle::lib_features::LibFeatures> {
self.get_lib_features(LOCAL_CRATE)
}
pub fn lang_items(self) -> Lrc<middle::lang_items::LanguageItems> {
self.get_lang_items(LOCAL_CRATE)
}
/// Due to missing llvm support for lowering 128 bit math to software emulation
/// (on some targets), the lowering can be done in MIR.
///
/// This function only exists until said support is implemented.
pub fn is_binop_lang_item(&self, def_id: DefId) -> Option<(mir::BinOp, bool)> {
let items = self.lang_items();
let def_id = Some(def_id);
if items.i128_add_fn() == def_id { Some((mir::BinOp::Add, false)) }
else if items.u128_add_fn() == def_id { Some((mir::BinOp::Add, false)) }
else if items.i128_sub_fn() == def_id { Some((mir::BinOp::Sub, false)) }
else if items.u128_sub_fn() == def_id { Some((mir::BinOp::Sub, false)) }
else if items.i128_mul_fn() == def_id { Some((mir::BinOp::Mul, false)) }
else if items.u128_mul_fn() == def_id { Some((mir::BinOp::Mul, false)) }
else if items.i128_div_fn() == def_id { Some((mir::BinOp::Div, false)) }
else if items.u128_div_fn() == def_id { Some((mir::BinOp::Div, false)) }
else if items.i128_rem_fn() == def_id { Some((mir::BinOp::Rem, false)) }
else if items.u128_rem_fn() == def_id { Some((mir::BinOp::Rem, false)) }
else if items.i128_shl_fn() == def_id { Some((mir::BinOp::Shl, false)) }
else if items.u128_shl_fn() == def_id { Some((mir::BinOp::Shl, false)) }
else if items.i128_shr_fn() == def_id { Some((mir::BinOp::Shr, false)) }
else if items.u128_shr_fn() == def_id { Some((mir::BinOp::Shr, false)) }
else if items.i128_addo_fn() == def_id { Some((mir::BinOp::Add, true)) }
else if items.u128_addo_fn() == def_id { Some((mir::BinOp::Add, true)) }
else if items.i128_subo_fn() == def_id { Some((mir::BinOp::Sub, true)) }
else if items.u128_subo_fn() == def_id { Some((mir::BinOp::Sub, true)) }
else if items.i128_mulo_fn() == def_id { Some((mir::BinOp::Mul, true)) }
else if items.u128_mulo_fn() == def_id { Some((mir::BinOp::Mul, true)) }
else if items.i128_shlo_fn() == def_id { Some((mir::BinOp::Shl, true)) }
else if items.u128_shlo_fn() == def_id { Some((mir::BinOp::Shl, true)) }
else if items.i128_shro_fn() == def_id { Some((mir::BinOp::Shr, true)) }
else if items.u128_shro_fn() == def_id { Some((mir::BinOp::Shr, true)) }
else { None }
}
pub fn stability(self) -> Lrc<stability::Index<'tcx>> {
self.stability_index(LOCAL_CRATE)
}
pub fn crates(self) -> Lrc<Vec<CrateNum>> {
self.all_crate_nums(LOCAL_CRATE)
}
pub fn features(self) -> Lrc<feature_gate::Features> {
self.features_query(LOCAL_CRATE)
}
pub fn def_key(self, id: DefId) -> hir_map::DefKey {
if id.is_local() {
self.hir().def_key(id)
} else {
self.cstore.def_key(id)
}
}
/// Convert a `DefId` into its fully expanded `DefPath` (every
/// `DefId` is really just an interned def-path).
///
/// Note that if `id` is not local to this crate, the result will
/// be a non-local `DefPath`.
pub fn def_path(self, id: DefId) -> hir_map::DefPath {
if id.is_local() {
self.hir().def_path(id)
} else {
self.cstore.def_path(id)
}
}
#[inline]
pub fn def_path_hash(self, def_id: DefId) -> hir_map::DefPathHash {
if def_id.is_local() {
self.hir().definitions().def_path_hash(def_id.index)
} else {
self.cstore.def_path_hash(def_id)
}
}
pub fn def_path_debug_str(self, def_id: DefId) -> String {
// We are explicitly not going through queries here in order to get
// crate name and disambiguator since this code is called from debug!()
// statements within the query system and we'd run into endless
// recursion otherwise.
let (crate_name, crate_disambiguator) = if def_id.is_local() {
(self.crate_name.clone(),
self.sess.local_crate_disambiguator())
} else {
(self.cstore.crate_name_untracked(def_id.krate),
self.cstore.crate_disambiguator_untracked(def_id.krate))
};
format!("{}[{}]{}",
crate_name,
// Don't print the whole crate disambiguator. That's just
// annoying in debug output.
&(crate_disambiguator.to_fingerprint().to_hex())[..4],
self.def_path(def_id).to_string_no_crate())
}
pub fn metadata_encoding_version(self) -> Vec<u8> {
self.cstore.metadata_encoding_version().to_vec()
}
// Note that this is *untracked* and should only be used within the query
// system if the result is otherwise tracked through queries
pub fn crate_data_as_rc_any(self, cnum: CrateNum) -> Lrc<dyn Any> {
self.cstore.crate_data_as_rc_any(cnum)
}
#[inline(always)]
pub fn create_stable_hashing_context(self) -> StableHashingContext<'a> {
let krate = self.gcx.hir_map.forest.untracked_krate();
StableHashingContext::new(self.sess,
krate,
self.hir().definitions(),
self.cstore)
}
// This method makes sure that we have a DepNode and a Fingerprint for
// every upstream crate. It needs to be called once right after the tcx is
// created.
// With full-fledged red/green, the method will probably become unnecessary
// as this will be done on-demand.
pub fn allocate_metadata_dep_nodes(self) {
// We cannot use the query versions of crates() and crate_hash(), since
// those would need the DepNodes that we are allocating here.
for cnum in self.cstore.crates_untracked() {
let dep_node = DepNode::new(self, DepConstructor::CrateMetadata(cnum));
let crate_hash = self.cstore.crate_hash_untracked(cnum);
self.dep_graph.with_task(dep_node,
self,
crate_hash,
|_, x| x // No transformation needed
);
}
}
// This method exercises the `in_scope_traits_map` query for all possible
// values so that we have their fingerprints available in the DepGraph.
// This is only required as long as we still use the old dependency tracking
// which needs to have the fingerprints of all input nodes beforehand.
pub fn precompute_in_scope_traits_hashes(self) {
for &def_index in self.trait_map.keys() {
self.in_scope_traits_map(def_index);
}
}
pub fn serialize_query_result_cache<E>(self,
encoder: &mut E)
-> Result<(), E::Error>
where E: ty::codec::TyEncoder
{
self.queries.on_disk_cache.serialize(self.global_tcx(), encoder)
}
/// This checks whether one is allowed to have pattern bindings
/// that bind-by-move on a match arm that has a guard, e.g.:
///
/// ```rust
/// match foo { A(inner) if { /* something */ } => ..., ... }
/// ```
///
/// It is separate from check_for_mutation_in_guard_via_ast_walk,
/// because that method has a narrower effect that can be toggled
/// off via a separate `-Z` flag, at least for the short term.
pub fn allow_bind_by_move_patterns_with_guards(self) -> bool {
self.features().bind_by_move_pattern_guards && self.use_mir_borrowck()
}
/// If true, we should use a naive AST walk to determine if match
/// guard could perform bad mutations (or mutable-borrows).
pub fn check_for_mutation_in_guard_via_ast_walk(self) -> bool {
// If someone requests the feature, then be a little more
// careful and ensure that MIR-borrowck is enabled (which can
// happen via edition selection, via `feature(nll)`, or via an
// appropriate `-Z` flag) before disabling the mutation check.
if self.allow_bind_by_move_patterns_with_guards() {
return false;
}
return true;
}
/// If true, we should use the AST-based borrowck (we may *also* use
/// the MIR-based borrowck).
pub fn use_ast_borrowck(self) -> bool {
self.borrowck_mode().use_ast()
}
/// If true, we should use the MIR-based borrowck (we may *also* use
/// the AST-based borrowck).
pub fn use_mir_borrowck(self) -> bool {
self.borrowck_mode().use_mir()
}
/// If true, we should use the MIR-based borrow check, but also
/// fall back on the AST borrow check if the MIR-based one errors.
pub fn migrate_borrowck(self) -> bool {
self.borrowck_mode().migrate()
}
/// If true, make MIR codegen for `match` emit a temp that holds a
/// borrow of the input to the match expression.
pub fn generate_borrow_of_any_match_input(&self) -> bool {
self.emit_read_for_match()
}
/// If true, make MIR codegen for `match` emit FakeRead
/// statements (which simulate the maximal effect of executing the
/// patterns in a match arm).
pub fn emit_read_for_match(&self) -> bool {
self.use_mir_borrowck() && !self.sess.opts.debugging_opts.nll_dont_emit_read_for_match
}
/// If true, pattern variables for use in guards on match arms
/// will be bound as references to the data, and occurrences of
/// those variables in the guard expression will implicitly
/// dereference those bindings. (See rust-lang/rust#27282.)
pub fn all_pat_vars_are_implicit_refs_within_guards(self) -> bool {
self.borrowck_mode().use_mir()
}
/// If true, we should enable two-phase borrows checks. This is
/// done with either: `-Ztwo-phase-borrows`, `#![feature(nll)]`,
/// or by opting into an edition after 2015.
pub fn two_phase_borrows(self) -> bool {
self.sess.rust_2018() || self.features().nll ||
self.sess.opts.debugging_opts.two_phase_borrows
}
/// What mode(s) of borrowck should we run? AST? MIR? both?
/// (Also considers the `#![feature(nll)]` setting.)
pub fn borrowck_mode(&self) -> BorrowckMode {
// Here are the main constraints we need to deal with:
//
// 1. An opts.borrowck_mode of `BorrowckMode::Ast` is
// synonymous with no `-Z borrowck=...` flag at all.
// (This is arguably a historical accident.)
//
// 2. `BorrowckMode::Migrate` is the limited migration to
// NLL that we are deploying with the 2018 edition.
//
// 3. We want to allow developers on the Nightly channel
// to opt back into the "hard error" mode for NLL,
// (which they can do via specifying `#![feature(nll)]`
// explicitly in their crate).
//
// So, this precedence list is how pnkfelix chose to work with
// the above constraints:
//
// * `#![feature(nll)]` *always* means use NLL with hard
// errors. (To simplify the code here, it now even overrides
// a user's attempt to specify `-Z borrowck=compare`, which
// we arguably do not need anymore and should remove.)
//
// * Otherwise, if no `-Z borrowck=...` flag was given (or
// if `borrowck=ast` was specified), then use the default
// as required by the edition.
//
// * Otherwise, use the behavior requested via `-Z borrowck=...`
if self.features().nll { return BorrowckMode::Mir; }
match self.sess.opts.borrowck_mode {
mode @ BorrowckMode::Mir |
mode @ BorrowckMode::Compare |
mode @ BorrowckMode::Migrate => mode,
BorrowckMode::Ast => match self.sess.edition() {
Edition::Edition2015 => BorrowckMode::Ast,
Edition::Edition2018 => BorrowckMode::Migrate,
},
}
}
#[inline]
pub fn local_crate_exports_generics(self) -> bool {
debug_assert!(self.sess.opts.share_generics());
self.sess.crate_types.borrow().iter().any(|crate_type| {
match crate_type {
CrateType::Executable |
CrateType::Staticlib |
CrateType::ProcMacro |
CrateType::Cdylib => false,
CrateType::Rlib |
CrateType::Dylib => true,
}
})
}
// This method returns the DefId and the BoundRegion corresponding to the given region.
pub fn is_suitable_region(&self, region: Region<'tcx>) -> Option<FreeRegionInfo> {
let (suitable_region_binding_scope, bound_region) = match *region {
ty::ReFree(ref free_region) => (free_region.scope, free_region.bound_region),
ty::ReEarlyBound(ref ebr) => (
self.parent_def_id(ebr.def_id).unwrap(),
ty::BoundRegion::BrNamed(ebr.def_id, ebr.name),
),
_ => return None, // not a free region
};
let node_id = self.hir()
.as_local_node_id(suitable_region_binding_scope)
.unwrap();
let is_impl_item = match self.hir().find(node_id) {
Some(Node::Item(..)) | Some(Node::TraitItem(..)) => false,
Some(Node::ImplItem(..)) => {
self.is_bound_region_in_impl_item(suitable_region_binding_scope)
}
_ => return None,
};
return Some(FreeRegionInfo {
def_id: suitable_region_binding_scope,
boundregion: bound_region,
is_impl_item: is_impl_item,
});
}
pub fn return_type_impl_trait(
&self,
scope_def_id: DefId,
) -> Option<Ty<'tcx>> {
// HACK: `type_of_def_id()` will fail on these (#55796), so return None
let node_id = self.hir().as_local_node_id(scope_def_id).unwrap();
match self.hir().get(node_id) {
Node::Item(item) => {
match item.node {
ItemKind::Fn(..) => { /* type_of_def_id() will work */ }
_ => {
return None;
}
}
}
_ => { /* type_of_def_id() will work or panic */ }
}
let ret_ty = self.type_of(scope_def_id);
match ret_ty.sty {
ty::FnDef(_, _) => {
let sig = ret_ty.fn_sig(*self);
let output = self.erase_late_bound_regions(&sig.output());
if output.is_impl_trait() {
Some(output)
} else {
None
}
}
_ => None
}
}
// Here we check if the bound region is in Impl Item.
pub fn is_bound_region_in_impl_item(
&self,
suitable_region_binding_scope: DefId,
) -> bool {
let container_id = self.associated_item(suitable_region_binding_scope)
.container
.id();
if self.impl_trait_ref(container_id).is_some() {
// For now, we do not try to target impls of traits. This is
// because this message is going to suggest that the user
// change the fn signature, but they may not be free to do so,
// since the signature must match the trait.
//
// FIXME(#42706) -- in some cases, we could do better here.
return true;
}
false
}
}
impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
pub fn encode_metadata(self)
-> EncodedMetadata
{
self.cstore.encode_metadata(self)
}
}
impl<'gcx> GlobalCtxt<'gcx> {
/// Call the closure with a local `TyCtxt` using the given arena.
/// `interners` is a slot passed so we can create a CtxtInterners
/// with the same lifetime as `arena`.
pub fn enter_local<'tcx, F, R>(
&'gcx self,
arena: &'tcx SyncDroplessArena,
interners: &'tcx mut Option<CtxtInterners<'tcx>>,
f: F
) -> R
where
F: FnOnce(TyCtxt<'tcx, 'gcx, 'tcx>) -> R,
'gcx: 'tcx,
{
*interners = Some(CtxtInterners::new(&arena));
let tcx = TyCtxt {
gcx: self,
interners: interners.as_ref().unwrap(),
dummy: PhantomData,
};
ty::tls::with_related_context(tcx.global_tcx(), |icx| {
let new_icx = ty::tls::ImplicitCtxt {
tcx,
query: icx.query.clone(),
diagnostics: icx.diagnostics,
layout_depth: icx.layout_depth,
task_deps: icx.task_deps,
};
ty::tls::enter_context(&new_icx, |_| {
f(tcx)
})
})
}
}
/// A trait implemented for all X<'a> types which can be safely and
/// efficiently converted to X<'tcx> as long as they are part of the
/// provided TyCtxt<'tcx>.
/// This can be done, for example, for Ty<'tcx> or &'tcx Substs<'tcx>
/// by looking them up in their respective interners.
///
/// However, this is still not the best implementation as it does
/// need to compare the components, even for interned values.
/// It would be more efficient if TypedArena provided a way to
/// determine whether the address is in the allocated range.
///
/// None is returned if the value or one of the components is not part
/// of the provided context.
/// For Ty, None can be returned if either the type interner doesn't
/// contain the TyKind key or if the address of the interned
/// pointer differs. The latter case is possible if a primitive type,
/// e.g., `()` or `u8`, was interned in a different context.
pub trait Lift<'tcx>: fmt::Debug {
type Lifted: fmt::Debug + 'tcx;
fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted>;
}
macro_rules! nop_lift {
($ty:ty => $lifted:ty) => {
impl<'a, 'tcx> Lift<'tcx> for $ty {
type Lifted = $lifted;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
// Also try in the global tcx if we're not that.
if !tcx.is_global() {
self.lift_to_tcx(tcx.global_tcx())
} else {
None
}
}
}
};
}
macro_rules! nop_list_lift {
($ty:ty => $lifted:ty) => {
impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> {
type Lifted = &'tcx List<$lifted>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
if self.is_empty() {
return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
// Also try in the global tcx if we're not that.
if !tcx.is_global() {
self.lift_to_tcx(tcx.global_tcx())
} else {
None
}
}
}
};
}
nop_lift!{Ty<'a> => Ty<'tcx>}
nop_lift!{Region<'a> => Region<'tcx>}
nop_lift!{Goal<'a> => Goal<'tcx>}
nop_lift!{&'a LazyConst<'a> => &'tcx LazyConst<'tcx>}
nop_list_lift!{Goal<'a> => Goal<'tcx>}
nop_list_lift!{Clause<'a> => Clause<'tcx>}
nop_list_lift!{Ty<'a> => Ty<'tcx>}
nop_list_lift!{ExistentialPredicate<'a> => ExistentialPredicate<'tcx>}
nop_list_lift!{Predicate<'a> => Predicate<'tcx>}
nop_list_lift!{CanonicalVarInfo => CanonicalVarInfo}
nop_list_lift!{ProjectionKind<'a> => ProjectionKind<'tcx>}
// this is the impl for `&'a Substs<'a>`
nop_list_lift!{Kind<'a> => Kind<'tcx>}
impl<'a, 'tcx> Lift<'tcx> for &'a mir::interpret::Allocation {
type Lifted = &'tcx mir::interpret::Allocation;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
assert!(tcx.global_arenas.const_allocs.in_arena(*self as *const _));
Some(unsafe { mem::transmute(*self) })
}
}
pub mod tls {
use super::{GlobalCtxt, TyCtxt};
use std::fmt;
use std::mem;
use std::marker::PhantomData;
use std::ptr;
use syntax_pos;
use ty::query;
use errors::{Diagnostic, TRACK_DIAGNOSTICS};
use rustc_data_structures::OnDrop;
use rustc_data_structures::sync::{self, Lrc, Lock};
use rustc_data_structures::thin_vec::ThinVec;
use dep_graph::TaskDeps;
#[cfg(not(parallel_queries))]
use std::cell::Cell;
#[cfg(parallel_queries)]
use rayon_core;
/// This is the implicit state of rustc. It contains the current
/// TyCtxt and query. It is updated when creating a local interner or
/// executing a new query. Whenever there's a TyCtxt value available
/// you should also have access to an ImplicitCtxt through the functions
/// in this module.
#[derive(Clone)]
pub struct ImplicitCtxt<'a, 'gcx: 'tcx, 'tcx> {
/// The current TyCtxt. Initially created by `enter_global` and updated
/// by `enter_local` with a new local interner
pub tcx: TyCtxt<'tcx, 'gcx, 'tcx>,
/// The current query job, if any. This is updated by JobOwner::start in
/// ty::query::plumbing when executing a query
pub query: Option<Lrc<query::QueryJob<'gcx>>>,
/// Where to store diagnostics for the current query job, if any.
/// This is updated by JobOwner::start in ty::query::plumbing when executing a query
pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
/// Used to prevent layout from recursing too deeply.
pub layout_depth: usize,
/// The current dep graph task. This is used to add dependencies to queries
/// when executing them
pub task_deps: Option<&'a Lock<TaskDeps>>,
}
/// Sets Rayon's thread local variable which is preserved for Rayon jobs
/// to `value` during the call to `f`. It is restored to its previous value after.
/// This is used to set the pointer to the new ImplicitCtxt.
#[cfg(parallel_queries)]
#[inline]
fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
rayon_core::tlv::with(value, f)
}
/// Gets Rayon's thread local variable which is preserved for Rayon jobs.
/// This is used to get the pointer to the current ImplicitCtxt.
#[cfg(parallel_queries)]
#[inline]
fn get_tlv() -> usize {
rayon_core::tlv::get()
}
/// A thread local variable which stores a pointer to the current ImplicitCtxt
#[cfg(not(parallel_queries))]
thread_local!(static TLV: Cell<usize> = Cell::new(0));
/// Sets TLV to `value` during the call to `f`.
/// It is restored to its previous value after.
/// This is used to set the pointer to the new ImplicitCtxt.
#[cfg(not(parallel_queries))]
#[inline]
fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
let old = get_tlv();
let _reset = OnDrop(move || TLV.with(|tlv| tlv.set(old)));
TLV.with(|tlv| tlv.set(value));
f()
}
/// This is used to get the pointer to the current ImplicitCtxt.
#[cfg(not(parallel_queries))]
fn get_tlv() -> usize {
TLV.with(|tlv| tlv.get())
}
/// This is a callback from libsyntax as it cannot access the implicit state
/// in librustc otherwise
fn span_debug(span: syntax_pos::Span, f: &mut fmt::Formatter<'_>) -> fmt::Result {
with_opt(|tcx| {
if let Some(tcx) = tcx {
write!(f, "{}", tcx.sess.source_map().span_to_string(span))
} else {
syntax_pos::default_span_debug(span, f)
}
})
}
/// This is a callback from libsyntax as it cannot access the implicit state
/// in librustc otherwise. It is used to when diagnostic messages are
/// emitted and stores them in the current query, if there is one.
fn track_diagnostic(diagnostic: &Diagnostic) {
with_context_opt(|icx| {
if let Some(icx) = icx {
if let Some(ref diagnostics) = icx.diagnostics {
let mut diagnostics = diagnostics.lock();
diagnostics.extend(Some(diagnostic.clone()));
}
}
})
}
/// Sets up the callbacks from libsyntax on the current thread
pub fn with_thread_locals<F, R>(f: F) -> R
where F: FnOnce() -> R
{
syntax_pos::SPAN_DEBUG.with(|span_dbg| {
let original_span_debug = span_dbg.get();
span_dbg.set(span_debug);
let _on_drop = OnDrop(move || {
span_dbg.set(original_span_debug);
});
TRACK_DIAGNOSTICS.with(|current| {
let original = current.get();
current.set(track_diagnostic);
let _on_drop = OnDrop(move || {
current.set(original);
});
f()
})
})
}
/// Sets `context` as the new current ImplicitCtxt for the duration of the function `f`
#[inline]
pub fn enter_context<'a, 'gcx: 'tcx, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'gcx, 'tcx>,
f: F) -> R
where F: FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R
{
set_tlv(context as *const _ as usize, || {
f(&context)
})
}
/// Enters GlobalCtxt by setting up libsyntax callbacks and
/// creating a initial TyCtxt and ImplicitCtxt.
/// This happens once per rustc session and TyCtxts only exists
/// inside the `f` function.
pub fn enter_global<'gcx, F, R>(gcx: &'gcx GlobalCtxt<'gcx>, f: F) -> R
where F: FnOnce(TyCtxt<'gcx, 'gcx, 'gcx>) -> R
{
with_thread_locals(|| {
// Update GCX_PTR to indicate there's a GlobalCtxt available
GCX_PTR.with(|lock| {
*lock.lock() = gcx as *const _ as usize;
});
// Set GCX_PTR back to 0 when we exit
let _on_drop = OnDrop(move || {
GCX_PTR.with(|lock| *lock.lock() = 0);
});
let tcx = TyCtxt {
gcx,
interners: &gcx.global_interners,
dummy: PhantomData,
};
let icx = ImplicitCtxt {
tcx,
query: None,
diagnostics: None,
layout_depth: 0,
task_deps: None,
};
enter_context(&icx, |_| {
f(tcx)
})
})
}
/// Stores a pointer to the GlobalCtxt if one is available.
/// This is used to access the GlobalCtxt in the deadlock handler
/// given to Rayon.
scoped_thread_local!(pub static GCX_PTR: Lock<usize>);
/// Creates a TyCtxt and ImplicitCtxt based on the GCX_PTR thread local.
/// This is used in the deadlock handler.
pub unsafe fn with_global<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R
{
let gcx = GCX_PTR.with(|lock| *lock.lock());
assert!(gcx != 0);
let gcx = &*(gcx as *const GlobalCtxt<'_>);
let tcx = TyCtxt {
gcx,
interners: &gcx.global_interners,
dummy: PhantomData,
};
let icx = ImplicitCtxt {
query: None,
diagnostics: None,
tcx,
layout_depth: 0,
task_deps: None,
};
enter_context(&icx, |_| f(tcx))
}
/// Allows access to the current ImplicitCtxt in a closure if one is available
#[inline]
pub fn with_context_opt<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R
{
let context = get_tlv();
if context == 0 {
f(None)
} else {
// We could get a ImplicitCtxt pointer from another thread.
// Ensure that ImplicitCtxt is Sync
sync::assert_sync::<ImplicitCtxt<'_, '_, '_>>();
unsafe { f(Some(&*(context as *const ImplicitCtxt<'_, '_, '_>))) }
}
}
/// Allows access to the current ImplicitCtxt.
/// Panics if there is no ImplicitCtxt available
#[inline]
pub fn with_context<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R
{
with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls")))
}
/// Allows access to the current ImplicitCtxt whose tcx field has the same global
/// interner as the tcx argument passed in. This means the closure is given an ImplicitCtxt
/// with the same 'gcx lifetime as the TyCtxt passed in.
/// This will panic if you pass it a TyCtxt which has a different global interner from
/// the current ImplicitCtxt's tcx field.
#[inline]
pub fn with_related_context<'a, 'gcx, 'tcx1, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx1>, f: F) -> R
where F: for<'b, 'tcx2> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx2>) -> R
{
with_context(|context| {
unsafe {
assert!(ptr::eq(context.tcx.gcx, tcx.gcx));
let context: &ImplicitCtxt<'_, '_, '_> = mem::transmute(context);
f(context)
}
})
}
/// Allows access to the current ImplicitCtxt whose tcx field has the same global
/// interner and local interner as the tcx argument passed in. This means the closure
/// is given an ImplicitCtxt with the same 'tcx and 'gcx lifetimes as the TyCtxt passed in.
/// This will panic if you pass it a TyCtxt which has a different global interner or
/// a different local interner from the current ImplicitCtxt's tcx field.
#[inline]
pub fn with_fully_related_context<'a, 'gcx, 'tcx, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx>, f: F) -> R
where F: for<'b> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx>) -> R
{
with_context(|context| {
unsafe {
assert!(ptr::eq(context.tcx.gcx, tcx.gcx));
assert!(ptr::eq(context.tcx.interners, tcx.interners));
let context: &ImplicitCtxt<'_, '_, '_> = mem::transmute(context);
f(context)
}
})
}
/// Allows access to the TyCtxt in the current ImplicitCtxt.
/// Panics if there is no ImplicitCtxt available
#[inline]
pub fn with<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R
{
with_context(|context| f(context.tcx))
}
/// Allows access to the TyCtxt in the current ImplicitCtxt.
/// The closure is passed None if there is no ImplicitCtxt available
#[inline]
pub fn with_opt<F, R>(f: F) -> R
where F: for<'a, 'gcx, 'tcx> FnOnce(Option<TyCtxt<'a, 'gcx, 'tcx>>) -> R
{
with_context_opt(|opt_context| f(opt_context.map(|context| context.tcx)))
}
}
macro_rules! sty_debug_print {
($ctxt: expr, $($variant: ident),*) => {{
// curious inner module to allow variant names to be used as
// variable names.
#[allow(non_snake_case)]
mod inner {
use ty::{self, TyCtxt};
use ty::context::Interned;
#[derive(Copy, Clone)]
struct DebugStat {
total: usize,
region_infer: usize,
ty_infer: usize,
both_infer: usize,
}
pub fn go(tcx: TyCtxt<'_, '_, '_>) {
let mut total = DebugStat {
total: 0,
region_infer: 0, ty_infer: 0, both_infer: 0,
};
$(let mut $variant = total;)*
for &Interned(t) in tcx.interners.type_.borrow().keys() {
let variant = match t.sty {
ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
ty::Float(..) | ty::Str | ty::Never => continue,
ty::Error => /* unimportant */ continue,
$(ty::$variant(..) => &mut $variant,)*
};
let region = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
variant.total += 1;
total.total += 1;
if region { total.region_infer += 1; variant.region_infer += 1 }
if ty { total.ty_infer += 1; variant.ty_infer += 1 }
if region && ty { total.both_infer += 1; variant.both_infer += 1 }
}
println!("Ty interner total ty region both");
$(println!(" {:18}: {uses:6} {usespc:4.1}%, \
{ty:4.1}% {region:5.1}% {both:4.1}%",
stringify!($variant),
uses = $variant.total,
usespc = $variant.total as f64 * 100.0 / total.total as f64,
ty = $variant.ty_infer as f64 * 100.0 / total.total as f64,
region = $variant.region_infer as f64 * 100.0 / total.total as f64,
both = $variant.both_infer as f64 * 100.0 / total.total as f64);
)*
println!(" total {uses:6} \
{ty:4.1}% {region:5.1}% {both:4.1}%",
uses = total.total,
ty = total.ty_infer as f64 * 100.0 / total.total as f64,
region = total.region_infer as f64 * 100.0 / total.total as f64,
both = total.both_infer as f64 * 100.0 / total.total as f64)
}
}
inner::go($ctxt)
}}
}
impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
pub fn print_debug_stats(self) {
sty_debug_print!(
self,
Adt, Array, Slice, RawPtr, Ref, FnDef, FnPtr, Placeholder,
Generator, GeneratorWitness, Dynamic, Closure, Tuple, Bound,
Param, Infer, UnnormalizedProjection, Projection, Opaque, Foreign);
println!("Substs interner: #{}", self.interners.substs.borrow().len());
println!("Region interner: #{}", self.interners.region.borrow().len());
println!("Stability interner: #{}", self.stability_interner.borrow().len());
println!("Allocation interner: #{}", self.allocation_interner.borrow().len());
println!("Layout interner: #{}", self.layout_interner.borrow().len());
}
}
/// An entry in an interner.
struct Interned<'tcx, T: 'tcx+?Sized>(&'tcx T);
impl<'tcx, T: 'tcx+?Sized> Clone for Interned<'tcx, T> {
fn clone(&self) -> Self {
Interned(self.0)
}
}
impl<'tcx, T: 'tcx+?Sized> Copy for Interned<'tcx, T> {}
// N.B., an `Interned<Ty>` compares and hashes as a sty.
impl<'tcx> PartialEq for Interned<'tcx, TyS<'tcx>> {
fn eq(&self, other: &Interned<'tcx, TyS<'tcx>>) -> bool {
self.0.sty == other.0.sty
}
}
impl<'tcx> Eq for Interned<'tcx, TyS<'tcx>> {}
impl<'tcx> Hash for Interned<'tcx, TyS<'tcx>> {
fn hash<H: Hasher>(&self, s: &mut H) {
self.0.sty.hash(s)
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<TyKind<'lcx>> for Interned<'tcx, TyS<'tcx>> {
fn borrow<'a>(&'a self) -> &'a TyKind<'lcx> {
&self.0.sty
}
}
// N.B., an `Interned<List<T>>` compares and hashes as its elements.
impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, List<T>> {
fn eq(&self, other: &Interned<'tcx, List<T>>) -> bool {
self.0[..] == other.0[..]
}
}
impl<'tcx, T: Eq> Eq for Interned<'tcx, List<T>> {}
impl<'tcx, T: Hash> Hash for Interned<'tcx, List<T>> {
fn hash<H: Hasher>(&self, s: &mut H) {
self.0[..].hash(s)
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, List<Ty<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Ty<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[CanonicalVarInfo]> for Interned<'tcx, List<CanonicalVarInfo>> {
fn borrow<'a>(&'a self) -> &'a [CanonicalVarInfo] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Kind<'lcx>]> for Interned<'tcx, Substs<'tcx>> {
fn borrow<'a>(&'a self) -> &'a [Kind<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[ProjectionKind<'lcx>]>
for Interned<'tcx, List<ProjectionKind<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [ProjectionKind<'lcx>] {
&self.0[..]
}
}
impl<'tcx> Borrow<RegionKind> for Interned<'tcx, RegionKind> {
fn borrow<'a>(&'a self) -> &'a RegionKind {
&self.0
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<GoalKind<'lcx>> for Interned<'tcx, GoalKind<'tcx>> {
fn borrow<'a>(&'a self) -> &'a GoalKind<'lcx> {
&self.0
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[ExistentialPredicate<'lcx>]>
for Interned<'tcx, List<ExistentialPredicate<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [ExistentialPredicate<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Predicate<'lcx>]>
for Interned<'tcx, List<Predicate<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Predicate<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<Const<'lcx>> for Interned<'tcx, Const<'tcx>> {
fn borrow<'a>(&'a self) -> &'a Const<'lcx> {
&self.0
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Clause<'lcx>]>
for Interned<'tcx, List<Clause<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Clause<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Goal<'lcx>]>
for Interned<'tcx, List<Goal<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Goal<'lcx>] {
&self.0[..]
}
}
macro_rules! intern_method {
($lt_tcx:tt, $name:ident: $method:ident($alloc:ty,
$alloc_method:expr,
$alloc_to_key:expr,
$keep_in_local_tcx:expr) -> $ty:ty) => {
impl<'a, 'gcx, $lt_tcx> TyCtxt<'a, 'gcx, $lt_tcx> {
pub fn $method(self, v: $alloc) -> &$lt_tcx $ty {
let key = ($alloc_to_key)(&v);
// HACK(eddyb) Depend on flags being accurate to
// determine that all contents are in the global tcx.
// See comments on Lift for why we can't use that.
if ($keep_in_local_tcx)(&v) {
self.interners.$name.borrow_mut().intern_ref(key, || {
// Make sure we don't end up with inference
// types/regions in the global tcx.
if self.is_global() {
bug!("Attempted to intern `{:?}` which contains \
inference types/regions in the global type context",
v);
}
Interned($alloc_method(&self.interners.arena, v))
}).0
} else {
self.global_interners.$name.borrow_mut().intern_ref(key, || {
// This transmutes $alloc<'tcx> to $alloc<'gcx>
let v = unsafe {
mem::transmute(v)
};
let i: &$lt_tcx $ty = $alloc_method(&self.global_interners.arena, v);
// Cast to 'gcx
let i = unsafe { mem::transmute(i) };
Interned(i)
}).0
}
}
}
}
}
macro_rules! direct_interners {
($lt_tcx:tt, $($name:ident: $method:ident($keep_in_local_tcx:expr) -> $ty:ty),+) => {
$(impl<$lt_tcx> PartialEq for Interned<$lt_tcx, $ty> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<$lt_tcx> Eq for Interned<$lt_tcx, $ty> {}
impl<$lt_tcx> Hash for Interned<$lt_tcx, $ty> {
fn hash<H: Hasher>(&self, s: &mut H) {
self.0.hash(s)
}
}
intern_method!(
$lt_tcx,
$name: $method($ty,
|a: &$lt_tcx SyncDroplessArena, v| -> &$lt_tcx $ty { a.alloc(v) },
|x| x,
$keep_in_local_tcx) -> $ty);)+
}
}
pub fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool {
x.has_type_flags(ty::TypeFlags::KEEP_IN_LOCAL_TCX)
}
direct_interners!('tcx,
region: mk_region(|r: &RegionKind| r.keep_in_local_tcx()) -> RegionKind,
goal: mk_goal(|c: &GoalKind<'_>| keep_local(c)) -> GoalKind<'tcx>
);
macro_rules! slice_interners {
($($field:ident: $method:ident($ty:ident)),+) => (
$(intern_method!( 'tcx, $field: $method(
&[$ty<'tcx>],
|a, v| List::from_arena(a, v),
Deref::deref,
|xs: &[$ty<'_>]| xs.iter().any(keep_local)) -> List<$ty<'tcx>>);)+
)
}
slice_interners!(
existential_predicates: _intern_existential_predicates(ExistentialPredicate),
predicates: _intern_predicates(Predicate),
type_list: _intern_type_list(Ty),
substs: _intern_substs(Kind),
clauses: _intern_clauses(Clause),
goal_list: _intern_goals(Goal),
projs: _intern_projs(ProjectionKind)
);
// This isn't a perfect fit: CanonicalVarInfo slices are always
// allocated in the global arena, so this `intern_method!` macro is
// overly general. But we just return false for the code that checks
// whether they belong in the thread-local arena, so no harm done, and
// seems better than open-coding the rest.
intern_method! {
'tcx,
canonical_var_infos: _intern_canonical_var_infos(
&[CanonicalVarInfo],
|a, v| List::from_arena(a, v),
Deref::deref,
|_xs: &[CanonicalVarInfo]| -> bool { false }
) -> List<CanonicalVarInfo>
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// Given a `fn` type, returns an equivalent `unsafe fn` type;
/// that is, a `fn` type that is equivalent in every way for being
/// unsafe.
pub fn safe_to_unsafe_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> {
assert_eq!(sig.unsafety(), hir::Unsafety::Normal);
self.mk_fn_ptr(sig.map_bound(|sig| ty::FnSig {
unsafety: hir::Unsafety::Unsafe,
..sig
}))
}
/// Given a closure signature `sig`, returns an equivalent `fn`
/// type with the same signature. Detuples and so forth -- so
/// e.g., if we have a sig with `Fn<(u32, i32)>` then you would get
/// a `fn(u32, i32)`.
pub fn coerce_closure_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> {
let converted_sig = sig.map_bound(|s| {
let params_iter = match s.inputs()[0].sty {
ty::Tuple(params) => {
params.into_iter().cloned()
}
_ => bug!(),
};
self.mk_fn_sig(
params_iter,
s.output(),
s.variadic,
hir::Unsafety::Normal,
abi::Abi::Rust,
)
});
self.mk_fn_ptr(converted_sig)
}
#[inline]
pub fn mk_ty(&self, st: TyKind<'tcx>) -> Ty<'tcx> {
CtxtInterners::intern_ty(&self.interners, &self.global_interners, st)
}
pub fn mk_mach_int(self, tm: ast::IntTy) -> Ty<'tcx> {
match tm {
ast::IntTy::Isize => self.types.isize,
ast::IntTy::I8 => self.types.i8,
ast::IntTy::I16 => self.types.i16,
ast::IntTy::I32 => self.types.i32,
ast::IntTy::I64 => self.types.i64,
ast::IntTy::I128 => self.types.i128,
}
}
pub fn mk_mach_uint(self, tm: ast::UintTy) -> Ty<'tcx> {
match tm {
ast::UintTy::Usize => self.types.usize,
ast::UintTy::U8 => self.types.u8,
ast::UintTy::U16 => self.types.u16,
ast::UintTy::U32 => self.types.u32,
ast::UintTy::U64 => self.types.u64,
ast::UintTy::U128 => self.types.u128,
}
}
pub fn mk_mach_float(self, tm: ast::FloatTy) -> Ty<'tcx> {
match tm {
ast::FloatTy::F32 => self.types.f32,
ast::FloatTy::F64 => self.types.f64,
}
}
#[inline]
pub fn mk_str(self) -> Ty<'tcx> {
self.mk_ty(Str)
}
#[inline]
pub fn mk_static_str(self) -> Ty<'tcx> {
self.mk_imm_ref(self.types.re_static, self.mk_str())
}
#[inline]
pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
self.mk_ty(Adt(def, substs))
}
#[inline]
pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> {
self.mk_ty(Foreign(def_id))
}
pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
let def_id = self.require_lang_item(lang_items::OwnedBoxLangItem);
let adt_def = self.adt_def(def_id);
let substs = Substs::for_item(self, def_id, |param, substs| {
match param.kind {
GenericParamDefKind::Lifetime => bug!(),
GenericParamDefKind::Type { has_default, .. } => {
if param.index == 0 {
ty.into()
} else {
assert!(has_default);
self.type_of(param.def_id).subst(self, substs).into()
}
}
}
});
self.mk_ty(Adt(adt_def, substs))
}
#[inline]
pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
self.mk_ty(RawPtr(tm))
}
#[inline]
pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
self.mk_ty(Ref(r, tm.ty, tm.mutbl))
}
#[inline]
pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutMutable})
}
#[inline]
pub fn mk_imm_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
}
#[inline]
pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutMutable})
}
#[inline]
pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
}
#[inline]
pub fn mk_nil_ptr(self) -> Ty<'tcx> {
self.mk_imm_ptr(self.mk_unit())
}
#[inline]
pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> {
self.mk_ty(Array(ty, self.intern_lazy_const(
ty::LazyConst::Evaluated(ty::Const::from_usize(self.global_tcx(), n))
)))
}
#[inline]
pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ty(Slice(ty))
}
#[inline]
pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
self.mk_ty(Tuple(self.intern_type_list(ts)))
}
pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output {
iter.intern_with(|ts| self.mk_ty(Tuple(self.intern_type_list(ts))))
}
#[inline]
pub fn mk_unit(self) -> Ty<'tcx> {
self.types.unit
}
#[inline]
pub fn mk_diverging_default(self) -> Ty<'tcx> {
if self.features().never_type {
self.types.never
} else {
self.intern_tup(&[])
}
}
#[inline]
pub fn mk_bool(self) -> Ty<'tcx> {
self.mk_ty(Bool)
}
#[inline]
pub fn mk_fn_def(self, def_id: DefId,
substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
self.mk_ty(FnDef(def_id, substs))
}
#[inline]
pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> {
self.mk_ty(FnPtr(fty))
}
#[inline]
pub fn mk_dynamic(
self,
obj: ty::Binder<&'tcx List<ExistentialPredicate<'tcx>>>,
reg: ty::Region<'tcx>
) -> Ty<'tcx> {
self.mk_ty(Dynamic(obj, reg))
}
#[inline]
pub fn mk_projection(self,
item_def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> Ty<'tcx> {
self.mk_ty(Projection(ProjectionTy {
item_def_id,
substs,
}))
}
#[inline]
pub fn mk_closure(self, closure_id: DefId, closure_substs: ClosureSubsts<'tcx>)
-> Ty<'tcx> {
self.mk_ty(Closure(closure_id, closure_substs))
}
#[inline]
pub fn mk_generator(self,
id: DefId,
generator_substs: GeneratorSubsts<'tcx>,
movability: hir::GeneratorMovability)
-> Ty<'tcx> {
self.mk_ty(Generator(id, generator_substs, movability))
}
#[inline]
pub fn mk_generator_witness(self, types: ty::Binder<&'tcx List<Ty<'tcx>>>) -> Ty<'tcx> {
self.mk_ty(GeneratorWitness(types))
}
#[inline]
pub fn mk_var(self, v: TyVid) -> Ty<'tcx> {
self.mk_infer(TyVar(v))
}
#[inline]
pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> {
self.mk_infer(IntVar(v))
}
#[inline]
pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> {
self.mk_infer(FloatVar(v))
}
#[inline]
pub fn mk_infer(self, it: InferTy) -> Ty<'tcx> {
self.mk_ty(Infer(it))
}
#[inline]
pub fn mk_ty_param(self,
index: u32,
name: InternedString) -> Ty<'tcx> {
self.mk_ty(Param(ParamTy { idx: index, name: name }))
}
#[inline]
pub fn mk_self_type(self) -> Ty<'tcx> {
self.mk_ty_param(0, keywords::SelfUpper.name().as_interned_str())
}
pub fn mk_param_from_def(self, param: &ty::GenericParamDef) -> Kind<'tcx> {
match param.kind {
GenericParamDefKind::Lifetime => {
self.mk_region(ty::ReEarlyBound(param.to_early_bound_region_data())).into()
}
GenericParamDefKind::Type {..} => self.mk_ty_param(param.index, param.name).into(),
}
}
#[inline]
pub fn mk_opaque(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
self.mk_ty(Opaque(def_id, substs))
}
pub fn intern_existential_predicates(self, eps: &[ExistentialPredicate<'tcx>])
-> &'tcx List<ExistentialPredicate<'tcx>> {
assert!(!eps.is_empty());
assert!(eps.windows(2).all(|w| w[0].stable_cmp(self, &w[1]) != Ordering::Greater));
self._intern_existential_predicates(eps)
}
pub fn intern_predicates(self, preds: &[Predicate<'tcx>])
-> &'tcx List<Predicate<'tcx>> {
// FIXME consider asking the input slice to be sorted to avoid
// re-interning permutations, in which case that would be asserted
// here.
if preds.len() == 0 {
// The macro-generated method below asserts we don't intern an empty slice.
List::empty()
} else {
self._intern_predicates(preds)
}
}
pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx List<Ty<'tcx>> {
if ts.len() == 0 {
List::empty()
} else {
self._intern_type_list(ts)
}
}
pub fn intern_substs(self, ts: &[Kind<'tcx>]) -> &'tcx List<Kind<'tcx>> {
if ts.len() == 0 {
List::empty()
} else {
self._intern_substs(ts)
}
}
pub fn intern_projs(self, ps: &[ProjectionKind<'tcx>]) -> &'tcx List<ProjectionKind<'tcx>> {
if ps.len() == 0 {
List::empty()
} else {
self._intern_projs(ps)
}
}
pub fn intern_canonical_var_infos(self, ts: &[CanonicalVarInfo]) -> CanonicalVarInfos<'gcx> {
if ts.len() == 0 {
List::empty()
} else {
self.global_tcx()._intern_canonical_var_infos(ts)
}
}
pub fn intern_clauses(self, ts: &[Clause<'tcx>]) -> Clauses<'tcx> {
if ts.len() == 0 {
List::empty()
} else {
self._intern_clauses(ts)
}
}
pub fn intern_goals(self, ts: &[Goal<'tcx>]) -> Goals<'tcx> {
if ts.len() == 0 {
List::empty()
} else {
self._intern_goals(ts)
}
}
pub fn mk_fn_sig<I>(self,
inputs: I,
output: I::Item,
variadic: bool,
unsafety: hir::Unsafety,
abi: abi::Abi)
-> <I::Item as InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>::Output
where I: Iterator,
I::Item: InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>
{
inputs.chain(iter::once(output)).intern_with(|xs| ty::FnSig {
inputs_and_output: self.intern_type_list(xs),
variadic, unsafety, abi
})
}
pub fn mk_existential_predicates<I: InternAs<[ExistentialPredicate<'tcx>],
&'tcx List<ExistentialPredicate<'tcx>>>>(self, iter: I)
-> I::Output {
iter.intern_with(|xs| self.intern_existential_predicates(xs))
}
pub fn mk_predicates<I: InternAs<[Predicate<'tcx>],
&'tcx List<Predicate<'tcx>>>>(self, iter: I)
-> I::Output {
iter.intern_with(|xs| self.intern_predicates(xs))
}
pub fn mk_type_list<I: InternAs<[Ty<'tcx>],
&'tcx List<Ty<'tcx>>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_type_list(xs))
}
pub fn mk_substs<I: InternAs<[Kind<'tcx>],
&'tcx List<Kind<'tcx>>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_substs(xs))
}
pub fn mk_substs_trait(self,
self_ty: Ty<'tcx>,
rest: &[Kind<'tcx>])
-> &'tcx Substs<'tcx>
{
self.mk_substs(iter::once(self_ty.into()).chain(rest.iter().cloned()))
}
pub fn mk_clauses<I: InternAs<[Clause<'tcx>], Clauses<'tcx>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_clauses(xs))
}
pub fn mk_goals<I: InternAs<[Goal<'tcx>], Goals<'tcx>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_goals(xs))
}
pub fn lint_hir<S: Into<MultiSpan>>(self,
lint: &'static Lint,
hir_id: HirId,
span: S,
msg: &str) {
self.struct_span_lint_hir(lint, hir_id, span.into(), msg).emit()
}
pub fn lint_node<S: Into<MultiSpan>>(self,
lint: &'static Lint,
id: NodeId,
span: S,
msg: &str) {
self.struct_span_lint_node(lint, id, span.into(), msg).emit()
}
pub fn lint_hir_note<S: Into<MultiSpan>>(self,
lint: &'static Lint,
hir_id: HirId,
span: S,
msg: &str,
note: &str) {
let mut err = self.struct_span_lint_hir(lint, hir_id, span.into(), msg);
err.note(note);
err.emit()
}
pub fn lint_node_note<S: Into<MultiSpan>>(self,
lint: &'static Lint,
id: NodeId,
span: S,
msg: &str,
note: &str) {
let mut err = self.struct_span_lint_node(lint, id, span.into(), msg);
err.note(note);
err.emit()
}
pub fn lint_level_at_node(self, lint: &'static Lint, mut id: NodeId)
-> (lint::Level, lint::LintSource)
{
// Right now we insert a `with_ignore` node in the dep graph here to
// ignore the fact that `lint_levels` below depends on the entire crate.
// For now this'll prevent false positives of recompiling too much when
// anything changes.
//
// Once red/green incremental compilation lands we should be able to
// remove this because while the crate changes often the lint level map
// will change rarely.
self.dep_graph.with_ignore(|| {
let sets = self.lint_levels(LOCAL_CRATE);
loop {
let hir_id = self.hir().definitions().node_to_hir_id(id);
if let Some(pair) = sets.level_and_source(lint, hir_id, self.sess) {
return pair
}
let next = self.hir().get_parent_node(id);
if next == id {
bug!("lint traversal reached the root of the crate");
}
id = next;
}
})
}
pub fn struct_span_lint_hir<S: Into<MultiSpan>>(self,
lint: &'static Lint,
hir_id: HirId,
span: S,
msg: &str)
-> DiagnosticBuilder<'tcx>
{
let node_id = self.hir().hir_to_node_id(hir_id);
let (level, src) = self.lint_level_at_node(lint, node_id);
lint::struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg)
}
pub fn struct_span_lint_node<S: Into<MultiSpan>>(self,
lint: &'static Lint,
id: NodeId,
span: S,
msg: &str)
-> DiagnosticBuilder<'tcx>
{
let (level, src) = self.lint_level_at_node(lint, id);
lint::struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg)
}
pub fn struct_lint_node(self, lint: &'static Lint, id: NodeId, msg: &str)
-> DiagnosticBuilder<'tcx>
{
let (level, src) = self.lint_level_at_node(lint, id);
lint::struct_lint_level(self.sess, lint, level, src, None, msg)
}
pub fn in_scope_traits(self, id: HirId) -> Option<Lrc<StableVec<TraitCandidate>>> {
self.in_scope_traits_map(id.owner)
.and_then(|map| map.get(&id.local_id).cloned())
}
pub fn named_region(self, id: HirId) -> Option<resolve_lifetime::Region> {
self.named_region_map(id.owner)
.and_then(|map| map.get(&id.local_id).cloned())
}
pub fn is_late_bound(self, id: HirId) -> bool {
self.is_late_bound_map(id.owner)
.map(|set| set.contains(&id.local_id))
.unwrap_or(false)
}
pub fn object_lifetime_defaults(self, id: HirId)
-> Option<Lrc<Vec<ObjectLifetimeDefault>>>
{
self.object_lifetime_defaults_map(id.owner)
.and_then(|map| map.get(&id.local_id).cloned())
}
}
pub trait InternAs<T: ?Sized, R> {
type Output;
fn intern_with<F>(self, f: F) -> Self::Output
where F: FnOnce(&T) -> R;
}
impl<I, T, R, E> InternAs<[T], R> for I
where E: InternIteratorElement<T, R>,
I: Iterator<Item=E> {
type Output = E::Output;
fn intern_with<F>(self, f: F) -> Self::Output
where F: FnOnce(&[T]) -> R {
E::intern_with(self, f)
}
}
pub trait InternIteratorElement<T, R>: Sized {
type Output;
fn intern_with<I: Iterator<Item=Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output;
}
impl<T, R> InternIteratorElement<T, R> for T {
type Output = R;
fn intern_with<I: Iterator<Item=Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {
f(&iter.collect::<SmallVec<[_; 8]>>())
}
}
impl<'a, T, R> InternIteratorElement<T, R> for &'a T
where T: Clone + 'a
{
type Output = R;
fn intern_with<I: Iterator<Item=Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {
f(&iter.cloned().collect::<SmallVec<[_; 8]>>())
}
}
impl<T, R, E> InternIteratorElement<T, R> for Result<T, E> {
type Output = Result<R, E>;
fn intern_with<I: Iterator<Item=Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {
Ok(f(&iter.collect::<Result<SmallVec<[_; 8]>, _>>()?))
}
}
pub fn provide(providers: &mut ty::query::Providers<'_>) {
providers.in_scope_traits_map = |tcx, id| tcx.gcx.trait_map.get(&id).cloned();
providers.module_exports = |tcx, id| tcx.gcx.export_map.get(&id).cloned();
providers.crate_name = |tcx, id| {
assert_eq!(id, LOCAL_CRATE);
tcx.crate_name
};
providers.get_lib_features = |tcx, id| {
assert_eq!(id, LOCAL_CRATE);
Lrc::new(middle::lib_features::collect(tcx))
};
providers.get_lang_items = |tcx, id| {
assert_eq!(id, LOCAL_CRATE);
Lrc::new(middle::lang_items::collect(tcx))
};
providers.freevars = |tcx, id| tcx.gcx.freevars.get(&id).cloned();
providers.maybe_unused_trait_import = |tcx, id| {
tcx.maybe_unused_trait_imports.contains(&id)
};
providers.maybe_unused_extern_crates = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(tcx.maybe_unused_extern_crates.clone())
};
providers.stability_index = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(stability::Index::new(tcx))
};
providers.lookup_stability = |tcx, id| {
assert_eq!(id.krate, LOCAL_CRATE);
let id = tcx.hir().definitions().def_index_to_hir_id(id.index);
tcx.stability().local_stability(id)
};
providers.lookup_deprecation_entry = |tcx, id| {
assert_eq!(id.krate, LOCAL_CRATE);
let id = tcx.hir().definitions().def_index_to_hir_id(id.index);
tcx.stability().local_deprecation_entry(id)
};
providers.extern_mod_stmt_cnum = |tcx, id| {
let id = tcx.hir().as_local_node_id(id).unwrap();
tcx.cstore.extern_mod_stmt_cnum_untracked(id)
};
providers.all_crate_nums = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(tcx.cstore.crates_untracked())
};
providers.postorder_cnums = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(tcx.cstore.postorder_cnums_untracked())
};
providers.output_filenames = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
tcx.output_filenames.clone()
};
providers.features_query = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(tcx.sess.features_untracked().clone())
};
providers.is_panic_runtime = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
attr::contains_name(tcx.hir().krate_attrs(), "panic_runtime")
};
providers.is_compiler_builtins = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
attr::contains_name(tcx.hir().krate_attrs(), "compiler_builtins")
};
}
| 36.8685 | 100 | 0.566043 |
219066ac401587536398f0116ec7bccf1ad1439b | 8,684 | // Copyright 2015 The tiny-http Contributors
// Copyright 2015 The rust-chunked-transfer Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Result as IoResult;
use std::io::Read;
use std::io::Error as IoError;
use std::io::ErrorKind;
use std::fmt;
use std::error::Error;
/// Reads HTTP chunks and sends back real data.
///
/// # Example
///
/// ```
/// use chunked_transfer::Decoder;
/// use std::io::Read;
///
/// let encoded = b"3\r\nhel\r\nb\r\nlo world!!!\r\n0\r\n\r\n";
/// let mut decoded = String::new();
///
/// let mut decoder = Decoder::new(encoded as &[u8]);
/// decoder.read_to_string(&mut decoded);
///
/// assert_eq!(decoded, "hello world!!!");
/// ```
pub struct Decoder<R> {
// where the chunks come from
source: R,
// remaining size of the chunk being read
// none if we are not in a chunk
remaining_chunks_size: Option<usize>,
}
impl<R> Decoder<R> where R: Read {
pub fn new(source: R) -> Decoder<R> {
Decoder {
source: source,
remaining_chunks_size: None,
}
}
fn read_chunk_size(&mut self) -> IoResult<usize> {
let mut chunk_size = Vec::new();
let mut has_ext = false;
loop {
let byte = match self.source.by_ref().bytes().next() {
Some(b) => try!(b),
None => return Err(IoError::new(ErrorKind::InvalidInput, DecoderError)),
};
if byte == b'\r' {
break;
}
if byte == b';' {
has_ext = true;
break;
}
chunk_size.push(byte);
}
// Ignore extensions for now
if has_ext {
loop {
let byte = match self.source.by_ref().bytes().next() {
Some(b) => try!(b),
None => return Err(IoError::new(ErrorKind::InvalidInput, DecoderError)),
};
if byte == b'\r' {
break;
}
}
}
try!(self.read_line_feed());
let chunk_size = match String::from_utf8(chunk_size) {
Ok(c) => c,
Err(_) => return Err(IoError::new(ErrorKind::InvalidInput, DecoderError))
};
let chunk_size = match usize::from_str_radix(chunk_size.trim(), 16) {
Ok(c) => c,
Err(_) => return Err(IoError::new(ErrorKind::InvalidInput, DecoderError))
};
Ok(chunk_size)
}
fn read_carriage_return(&mut self) -> IoResult<()> {
match self.source.by_ref().bytes().next() {
Some(Ok(b'\r')) => Ok(()),
_ => Err(IoError::new(ErrorKind::InvalidInput, DecoderError)),
}
}
fn read_line_feed(&mut self) -> IoResult<()> {
match self.source.by_ref().bytes().next() {
Some(Ok(b'\n')) => Ok(()),
_ => Err(IoError::new(ErrorKind::InvalidInput, DecoderError)),
}
}
}
impl<R> Read for Decoder<R> where R: Read {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
let remaining_chunks_size = match self.remaining_chunks_size {
Some(c) => c,
None => {
// first possibility: we are not in a chunk, so we'll attempt to determine
// the chunks size
let chunk_size = try!(self.read_chunk_size());
// if the chunk size is 0, we are at EOF
if chunk_size == 0 {
try!(self.read_carriage_return());
try!(self.read_line_feed());
return Ok(0);
}
// now that we now the current chunk size, calling ourselves recursively
self.remaining_chunks_size = Some(chunk_size);
return self.read(buf);
}
};
// second possibility: we continue reading from a chunk
if buf.len() < remaining_chunks_size {
let read = try!(self.source.read(buf));
self.remaining_chunks_size = Some(remaining_chunks_size - read);
return Ok(read);
}
// third possibility: the read request goes further than the current chunk
// we simply read until the end of the chunk and return
assert!(buf.len() >= remaining_chunks_size);
let buf = &mut buf[.. remaining_chunks_size];
let read = try!(self.source.read(buf));
self.remaining_chunks_size = if read == remaining_chunks_size {
try!(self.read_carriage_return());
try!(self.read_line_feed());
None
} else {
Some(remaining_chunks_size - read)
};
return Ok(read);
}
}
#[derive(Debug, Copy, Clone)]
struct DecoderError;
impl fmt::Display for DecoderError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "Error while decoding chunks")
}
}
impl Error for DecoderError {
fn description(&self) -> &str {
"Error while decoding chunks"
}
}
#[cfg(test)]
mod test {
use super::Decoder;
use std::io;
use std::io::Read;
/// This unit test is taken from from Hyper
/// https://github.com/hyperium/hyper
/// Copyright (c) 2014 Sean McArthur
#[test]
fn test_read_chunk_size() {
fn read(s: &str, expected: usize) {
let mut decoded = Decoder::new(s.as_bytes());
let actual = decoded.read_chunk_size().unwrap();
assert_eq!(expected, actual);
}
fn read_err(s: &str) {
let mut decoded = Decoder::new(s.as_bytes());
let err_kind = decoded.read_chunk_size().unwrap_err().kind();
assert_eq!(err_kind, io::ErrorKind::InvalidInput);
}
read("1\r\n", 1);
read("01\r\n", 1);
read("0\r\n", 0);
read("00\r\n", 0);
read("A\r\n", 10);
read("a\r\n", 10);
read("Ff\r\n", 255);
read("Ff \r\n", 255);
// Missing LF or CRLF
read_err("F\rF");
read_err("F");
// Invalid hex digit
read_err("X\r\n");
read_err("1X\r\n");
read_err("-\r\n");
read_err("-1\r\n");
// Acceptable (if not fully valid) extensions do not influence the size
read("1;extension\r\n", 1);
read("a;ext name=value\r\n", 10);
read("1;extension;extension2\r\n", 1);
read("1;;; ;\r\n", 1);
read("2; extension...\r\n", 2);
read("3 ; extension=123\r\n", 3);
read("3 ;\r\n", 3);
read("3 ; \r\n", 3);
// Invalid extensions cause an error
read_err("1 invalid extension\r\n");
read_err("1 A\r\n");
read_err("1;no CRLF");
}
#[test]
fn test_valid_chunk_decode() {
let source = io::Cursor::new("3\r\nhel\r\nb\r\nlo world!!!\r\n0\r\n\r\n".to_string().into_bytes());
let mut decoded = Decoder::new(source);
let mut string = String::new();
decoded.read_to_string(&mut string).unwrap();
assert_eq!(string, "hello world!!!");
}
#[test]
fn test_decode_zero_length() {
let mut decoder = Decoder::new(b"0\r\n\r\n" as &[u8]);
let mut decoded = String::new();
decoder.read_to_string(&mut decoded).unwrap();
assert_eq!(decoded, "");
}
#[test]
fn test_decode_invalid_chunk_length() {
let mut decoder = Decoder::new(b"m\r\n\r\n" as &[u8]);
let mut decoded = String::new();
assert!(decoder.read_to_string(&mut decoded).is_err());
}
#[test]
fn invalid_input1() {
let source = io::Cursor::new("2\r\nhel\r\nb\r\nlo world!!!\r\n0\r\n".to_string().into_bytes());
let mut decoded = Decoder::new(source);
let mut string = String::new();
decoded.read_to_string(&mut string).is_err();
}
#[test]
fn invalid_input2() {
let source = io::Cursor::new("3\rhel\r\nb\r\nlo world!!!\r\n0\r\n".to_string().into_bytes());
let mut decoded = Decoder::new(source);
let mut string = String::new();
decoded.read_to_string(&mut string).is_err();
}
}
| 30.152778 | 107 | 0.546062 |
acc1061751cb96bc4e6d71023ab7c71ddc26f670 | 2,350 | #![allow(unused_macros)]
macro_rules! opt_warn {
($option:expr, $message:expr) => {
match $option {
Some(result) => result,
None => {
warn!($message);
continue;
},
}
}
}
macro_rules! rs_warn {
($result:expr, $message: expr) => {
match $result {
Ok(result) => result,
Err(error) => {
use std::error::Error;
warn!("{}", error.description());
continue;
},
}
}
}
macro_rules! opt_error {
($option:expr, $message:expr) => {
match $option {
Some(result) => result,
None => {
error!($message);
continue;
},
}
}
}
macro_rules! rs_error {
($result:expr) => {
match $result {
Ok(result) => result,
Err(error) => {
use std::error::Error;
error!("{}", error.description());
continue;
},
}
}
}
macro_rules! opt_ret_warn {
($option:expr, $message:expr) => {
match $option {
Some(result) => result,
None => {
warn!($message);
return None;
},
}
}
}
macro_rules! rs_ret_warn {
($result:expr, $message: expr) => {
match $result {
Ok(result) => result,
Err(error) => {
use std::error::Error;
warn!("{}", error.description());
return None;
},
}
}
}
macro_rules! opt_ret_error {
($option:expr, $message:expr) => {
match $option {
Some(result) => result,
None => {
error!($message);
return None;
},
}
}
}
macro_rules! rs_ret_error {
($result:expr) => {
match $result {
Ok(result) => result,
Err(error) => {
use std::error::Error;
error!("{}", error.description());
return None;
},
}
}
}
macro_rules! debug {
($fmt:expr) => (if cfg!(debug_assertions) {println!($fmt)});
($fmt:expr, $($arg:tt)*) => (if cfg!(debug_assertions) {println!($fmt, $($arg)*)});
}
| 21.962617 | 87 | 0.405106 |
91e6f5b4a8cc7a4f617d29b2401af860fc592529 | 21,886 | ///! Locally Optimal Block Preconditioned Conjugated
///!
///! This module implements the Locally Optimal Block Preconditioned Conjugated (LOBPCG) algorithm,
///which can be used as a solver for large symmetric positive definite eigenproblems.
use crate::error::{LinalgError, Result};
use crate::{cholesky::*, close_l2, eigh::*, norm::*, triangular::*};
use cauchy::Scalar;
use lax::Lapack;
use ndarray::prelude::*;
use ndarray::{Data, OwnedRepr, ScalarOperand};
use num_traits::{Float, NumCast};
/// Find largest or smallest eigenvalues
#[derive(Debug, Clone)]
pub enum Order {
Largest,
Smallest,
}
/// The result of the eigensolver
///
/// In the best case the eigensolver has converged with a result better than the given threshold,
/// then a `LobpcgResult::Ok` gives the eigenvalues, eigenvectors and norms. If an error ocurred
/// during the process, it is returned in `LobpcgResult::Err`, but the best result is still returned,
/// as it could be usable. If there is no result at all, then `LobpcgResult::NoResult` is returned.
/// This happens if the algorithm fails in an early stage, for example if the matrix `A` is not SPD
#[derive(Debug)]
pub enum LobpcgResult<A> {
Ok(Array1<A>, Array2<A>, Vec<A>),
Err(Array1<A>, Array2<A>, Vec<A>, LinalgError),
NoResult(LinalgError),
}
/// Solve full eigenvalue problem, sort by `order` and truncate to `size`
fn sorted_eig<S: Data<Elem = A>, A: Scalar + Lapack>(
a: ArrayBase<S, Ix2>,
b: Option<ArrayBase<S, Ix2>>,
size: usize,
order: &Order,
) -> Result<(Array1<A>, Array2<A>)> {
let n = a.len_of(Axis(0));
let (vals, vecs) = match b {
Some(b) => (a, b).eigh(UPLO::Upper).map(|x| (x.0, (x.1).0))?,
_ => a.eigh(UPLO::Upper)?,
};
Ok(match order {
Order::Largest => (
vals.slice_move(s![n-size..; -1]).mapv(Scalar::from_real),
vecs.slice_move(s![.., n-size..; -1]),
),
Order::Smallest => (
vals.slice_move(s![..size]).mapv(Scalar::from_real),
vecs.slice_move(s![.., ..size]),
),
})
}
/// Masks a matrix with the given `matrix`
fn ndarray_mask<A: Scalar>(matrix: ArrayView2<A>, mask: &[bool]) -> Array2<A> {
assert_eq!(mask.len(), matrix.ncols());
let indices = (0..mask.len())
.zip(mask.iter())
.filter(|(_, b)| **b)
.map(|(a, _)| a)
.collect::<Vec<usize>>();
matrix.select(Axis(1), &indices)
}
/// Applies constraints ensuring that a matrix is orthogonal to it
///
/// This functions takes a matrix `v` and constraint-matrix `y` and orthogonalize `v` to `y`.
fn apply_constraints<A: Scalar + Lapack>(
mut v: ArrayViewMut<A, Ix2>,
cholesky_yy: &CholeskyFactorized<OwnedRepr<A>>,
y: ArrayView2<A>,
) {
let gram_yv = y.t().dot(&v);
let u = gram_yv
.gencolumns()
.into_iter()
.map(|x| {
let res = cholesky_yy.solvec(&x).unwrap();
res.to_vec()
})
.flatten()
.collect::<Vec<A>>();
let rows = gram_yv.len_of(Axis(0));
let u = Array2::from_shape_vec((rows, u.len() / rows), u).unwrap();
v -= &(y.dot(&u));
}
/// Orthonormalize `V` with Cholesky factorization
///
/// This also returns the matrix `R` of the `QR` problem
fn orthonormalize<T: Scalar + Lapack>(v: Array2<T>) -> Result<(Array2<T>, Array2<T>)> {
let gram_vv = v.t().dot(&v);
let gram_vv_fac = gram_vv.cholesky(UPLO::Lower)?;
close_l2(
&gram_vv,
&gram_vv_fac.dot(&gram_vv_fac.t()),
NumCast::from(1e-5).unwrap(),
);
let v_t = v.reversed_axes();
let u = gram_vv_fac
.solve_triangular(UPLO::Lower, Diag::NonUnit, &v_t)?
.reversed_axes();
Ok((u, gram_vv_fac))
}
/// Eigenvalue solver for large symmetric positive definite (SPD) eigenproblems
///
/// # Arguments
/// * `a` - An operator defining the problem, usually a sparse (sometimes also dense) matrix
/// multiplication. Also called the "stiffness matrix".
/// * `x` - Initial approximation of the k eigenvectors. If `a` has shape=(n,n), then `x` should
/// have shape=(n,k).
/// * `m` - Preconditioner to `a`, by default the identity matrix. Should approximate the inverse
/// of `a`.
/// * `y` - Constraints of (n,size_y), iterations are performed in the orthogonal complement of the
/// column-space of `y`. It must be full rank.
/// * `tol` - The tolerance values defines at which point the solver stops the optimization. The approximation
/// of a eigenvalue stops when then l2-norm of the residual is below this threshold.
/// * `maxiter` - The maximal number of iterations
/// * `order` - Whether to solve for the largest or lowest eigenvalues
///
/// The function returns an `LobpcgResult` with the eigenvalue/eigenvector and achieved residual norm
/// for it. All iterations are tracked and the optimal solution returned. In case of an error a
/// special variant `LobpcgResult::NotConverged` additionally carries the error. This can happen when
/// the precision of the matrix is too low (switch then from `f32` to `f64` for example).
pub fn lobpcg<
A: Float + Scalar + Lapack + ScalarOperand + PartialOrd + Default,
F: Fn(ArrayView2<A>) -> Array2<A>,
G: Fn(ArrayViewMut2<A>),
>(
a: F,
mut x: Array2<A>,
m: G,
y: Option<Array2<A>>,
tol: f32,
maxiter: usize,
order: Order,
) -> LobpcgResult<A> {
// the initital approximation should be maximal square
// n is the dimensionality of the problem
let (n, size_x) = (x.nrows(), x.ncols());
assert!(size_x <= n);
/*let size_y = match y {
Some(ref y) => y.ncols(),
_ => 0,
};
if (n - size_y) < 5 * size_x {
panic!("Please use a different approach, the LOBPCG method only supports the calculation of a couple of eigenvectors!");
}*/
// cap the number of iteration
let mut iter = usize::min(n * 10, maxiter);
let tol = NumCast::from(tol).unwrap();
// calculate cholesky factorization of YY' and apply constraints to initial guess
let cholesky_yy = y.as_ref().map(|y| {
let cholesky_yy = y.t().dot(y).factorizec(UPLO::Lower).unwrap();
apply_constraints(x.view_mut(), &cholesky_yy, y.view());
cholesky_yy
});
// orthonormalize the initial guess
let (x, _) = match orthonormalize(x) {
Ok(x) => x,
Err(err) => return LobpcgResult::NoResult(err),
};
// calculate AX and XAX for Rayleigh quotient
let ax = a(x.view());
let xax = x.t().dot(&ax);
// perform eigenvalue decomposition of XAX
let (mut lambda, eig_block) = match sorted_eig(xax.view(), None, size_x, &order) {
Ok(x) => x,
Err(err) => return LobpcgResult::NoResult(err),
};
// initiate approximation of the eigenvector
let mut x = x.dot(&eig_block);
let mut ax = ax.dot(&eig_block);
// track residual below threshold
let mut activemask = vec![true; size_x];
// track residuals and best result
let mut residual_norms_history = Vec::new();
let mut best_result = None;
let mut previous_block_size = size_x;
let mut ident: Array2<A> = Array2::eye(size_x);
let ident0: Array2<A> = Array2::eye(size_x);
let two: A = NumCast::from(2.0).unwrap();
let mut previous_p_ap: Option<(Array2<A>, Array2<A>)> = None;
let mut explicit_gram_flag = true;
let final_norm = loop {
// calculate residual
let lambda_diag = Array2::from_diag(&lambda);
let lambda_x = x.dot(&lambda_diag);
// calculate residual AX - lambdaX
let r = &ax - &lambda_x;
// calculate L2 norm of error for every eigenvalue
let residual_norms = r
.gencolumns()
.into_iter()
.map(|x| x.norm())
.collect::<Vec<A::Real>>();
residual_norms_history.push(residual_norms.clone());
// compare best result and update if we improved
let sum_rnorm: A::Real = residual_norms.iter().cloned().sum();
if best_result
.as_ref()
.map(|x: &(_, _, Vec<A::Real>)| x.2.iter().cloned().sum::<A::Real>() > sum_rnorm)
.unwrap_or(true)
{
best_result = Some((lambda.clone(), x.clone(), residual_norms.clone()));
}
// disable eigenvalues which are below the tolerance threshold
activemask = residual_norms
.iter()
.zip(activemask.iter())
.map(|(x, a)| *x > tol && *a)
.collect();
// resize identity block if necessary
let current_block_size = activemask.iter().filter(|x| **x).count();
if current_block_size != previous_block_size {
previous_block_size = current_block_size;
ident = Array2::eye(current_block_size);
}
// if we are below the threshold for all eigenvalue or exceeded the number of iteration,
// abort
if current_block_size == 0 || iter == 0 {
break Ok(residual_norms);
}
// select active eigenvalues, apply pre-conditioner, orthogonalize to Y and orthonormalize
let mut active_block_r = ndarray_mask(r.view(), &activemask);
// apply preconditioner
m(active_block_r.view_mut());
// apply constraints to the preconditioned residuals
if let (Some(ref y), Some(ref cholesky_yy)) = (&y, &cholesky_yy) {
apply_constraints(active_block_r.view_mut(), cholesky_yy, y.view());
}
// orthogonalize the preconditioned residual to x
active_block_r -= &x.dot(&x.t().dot(&active_block_r));
let (r, _) = match orthonormalize(active_block_r) {
Ok(x) => x,
Err(err) => break Err(err),
};
let ar = a(r.view());
// check whether `A` is of type `f32` or `f64`
let max_rnorm_float = if A::epsilon() > NumCast::from(1e-8).unwrap() {
NumCast::from(1.0).unwrap()
} else {
NumCast::from(1.0e-8).unwrap()
};
// if we are once below the max_rnorm, enable explicit gram flag
let max_norm = residual_norms
.into_iter()
.fold(A::Real::neg_infinity(), A::Real::max);
explicit_gram_flag = max_norm <= max_rnorm_float || explicit_gram_flag;
// perform the Rayleigh Ritz procedure
let xar = x.t().dot(&ar);
let mut rar = r.t().dot(&ar);
// for small residuals calculate covariance matrices explicitely, otherwise approximate
// them such that X is orthogonal and uncorrelated to the residual R and use eigenvalues of
// previous decomposition
let (xax, xx, rr, xr) = if explicit_gram_flag {
rar = (&rar + &rar.t()) / two;
let xax = x.t().dot(&ax);
(
(&xax + &xax.t()) / two,
x.t().dot(&x),
r.t().dot(&r),
x.t().dot(&r),
)
} else {
(
lambda_diag,
ident0.clone(),
ident.clone(),
Array2::zeros((size_x, current_block_size)),
)
};
// mask and orthonormalize P and AP
let mut p_ap = previous_p_ap
.as_ref()
.and_then(|(p, ap)| {
let active_p = ndarray_mask(p.view(), &activemask);
let active_ap = ndarray_mask(ap.view(), &activemask);
orthonormalize(active_p).map(|x| (active_ap, x)).ok()
})
.and_then(|(active_ap, (active_p, p_r))| {
// orthonormalize AP with R^{-1} of A
let active_ap = active_ap.reversed_axes();
p_r.solve_triangular(UPLO::Lower, Diag::NonUnit, &active_ap)
.map(|active_ap| (active_p, active_ap.reversed_axes()))
.ok()
});
// compute symmetric gram matrices and calculate solution of eigenproblem
//
// first try to compute the eigenvalue decomposition of the span{R, X, P},
// if this fails (or the algorithm was restarted), then just use span{R, X}
let result = p_ap
.as_ref()
.ok_or(LinalgError::Lapack(
lax::error::Error::LapackComputationalFailure { return_code: 1 },
))
.and_then(|(active_p, active_ap)| {
let xap = x.t().dot(active_ap);
let rap = r.t().dot(active_ap);
let pap = active_p.t().dot(active_ap);
let xp = x.t().dot(active_p);
let rp = r.t().dot(active_p);
let (pap, pp) = if explicit_gram_flag {
((&pap + &pap.t()) / two, active_p.t().dot(active_p))
} else {
(pap, ident.clone())
};
sorted_eig(
stack![
Axis(0),
stack![Axis(1), xax, xar, xap],
stack![Axis(1), xar.t(), rar, rap],
stack![Axis(1), xap.t(), rap.t(), pap]
],
Some(stack![
Axis(0),
stack![Axis(1), xx, xr, xp],
stack![Axis(1), xr.t(), rr, rp],
stack![Axis(1), xp.t(), rp.t(), pp]
]),
size_x,
&order,
)
})
.or_else(|_| {
p_ap = None;
sorted_eig(
stack![
Axis(0),
stack![Axis(1), xax, xar],
stack![Axis(1), xar.t(), rar]
],
Some(stack![
Axis(0),
stack![Axis(1), xx, xr],
stack![Axis(1), xr.t(), rr]
]),
size_x,
&order,
)
});
// update eigenvalues and eigenvectors (lambda is also used in the next iteration)
let eig_vecs;
match result {
Ok((x, y)) => {
lambda = x;
eig_vecs = y;
}
Err(x) => break Err(x),
}
// approximate eigenvector X and conjugate vectors P with solution of eigenproblem
let (p, ap, tau) = if let Some((active_p, active_ap)) = p_ap {
// tau are eigenvalues to basis of X
let tau = eig_vecs.slice(s![..size_x, ..]);
// alpha are eigenvalues to basis of R
let alpha = eig_vecs.slice(s![size_x..size_x + current_block_size, ..]);
// gamma are eigenvalues to basis of P
let gamma = eig_vecs.slice(s![size_x + current_block_size.., ..]);
// update AP and P in span{R, P} as linear combination
let updated_p = r.dot(&alpha) + active_p.dot(&gamma);
let updated_ap = ar.dot(&alpha) + active_ap.dot(&gamma);
(updated_p, updated_ap, tau)
} else {
// tau are eigenvalues to basis of X
let tau = eig_vecs.slice(s![..size_x, ..]);
// alpha are eigenvalues to basis of R
let alpha = eig_vecs.slice(s![size_x.., ..]);
// update AP and P as linear combination of the residual matrix R
let updated_p = r.dot(&alpha);
let updated_ap = ar.dot(&alpha);
(updated_p, updated_ap, tau)
};
// update approximation of X as linear combinations of span{X, P, R}
x = x.dot(&tau) + &p;
ax = ax.dot(&tau) + ≈
previous_p_ap = Some((p, ap));
iter -= 1;
};
// retrieve best result and convert norm into `A`
let (vals, vecs, rnorm) = best_result.unwrap();
let rnorm = rnorm.into_iter().map(Scalar::from_real).collect();
match final_norm {
Ok(_) => LobpcgResult::Ok(vals, vecs, rnorm),
Err(err) => LobpcgResult::Err(vals, vecs, rnorm, err),
}
}
#[cfg(test)]
mod tests {
use super::lobpcg;
use super::ndarray_mask;
use super::orthonormalize;
use super::sorted_eig;
use super::LobpcgResult;
use super::Order;
use crate::close_l2;
use crate::generate;
use crate::qr::*;
use ndarray::prelude::*;
/// Test the `sorted_eigen` function
#[test]
fn test_sorted_eigen() {
let matrix: Array2<f64> = generate::random((10, 10)) * 10.0;
let matrix = matrix.t().dot(&matrix);
// return all eigenvectors with largest first
let (vals, vecs) = sorted_eig(matrix.view(), None, 10, &Order::Largest).unwrap();
// calculate V * A * V' and compare to original matrix
let diag = Array2::from_diag(&vals);
let rec = (vecs.dot(&diag)).dot(&vecs.t());
close_l2(&matrix, &rec, 1e-5);
}
/// Test the masking function
#[test]
fn test_masking() {
let matrix: Array2<f64> = generate::random((10, 5)) * 10.0;
let masked_matrix = ndarray_mask(matrix.view(), &[true, true, false, true, false]);
close_l2(
&masked_matrix.slice(s![.., 2]),
&matrix.slice(s![.., 3]),
1e-12,
);
}
/// Test orthonormalization of a random matrix
#[test]
fn test_orthonormalize() {
let matrix: Array2<f64> = generate::random((10, 10)) * 10.0;
let (n, l) = orthonormalize(matrix.clone()).unwrap();
// check for orthogonality
let identity = n.dot(&n.t());
close_l2(&identity, &Array2::eye(10), 1e-2);
// compare returned factorization with QR decomposition
let (_, r) = matrix.qr().unwrap();
close_l2(&r.mapv(|x| x.abs()), &l.t().mapv(|x| x.abs()), 1e-2);
}
fn assert_symmetric(a: &Array2<f64>) {
close_l2(a, &a.t(), 1e-5);
}
fn check_eigenvalues(a: &Array2<f64>, order: Order, num: usize, ground_truth_eigvals: &[f64]) {
assert_symmetric(a);
let n = a.len_of(Axis(0));
let x: Array2<f64> = generate::random((n, num));
let result = lobpcg(|y| a.dot(&y), x, |_| {}, None, 1e-5, n * 2, order);
match result {
LobpcgResult::Ok(vals, _, r_norms) | LobpcgResult::Err(vals, _, r_norms, _) => {
// check convergence
for (i, norm) in r_norms.into_iter().enumerate() {
if norm > 1e-5 {
println!("==== Assertion Failed ====");
println!("The {}th eigenvalue estimation did not converge!", i);
panic!("Too large deviation of residual norm: {} > 0.01", norm);
}
}
// check correct order of eigenvalues
if ground_truth_eigvals.len() == num {
close_l2(
&Array1::from(ground_truth_eigvals.to_vec()),
&vals,
num as f64 * 5e-4,
)
}
}
LobpcgResult::NoResult(err) => panic!("Did not converge: {:?}", err),
}
}
/// Test the eigensolver with a identity matrix problem and a random initial solution
#[test]
fn test_eigsolver_diag() {
let diag = arr1(&[
1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20.,
]);
let a = Array2::from_diag(&diag);
check_eigenvalues(&a, Order::Largest, 3, &[20., 19., 18.]);
check_eigenvalues(&a, Order::Smallest, 3, &[1., 2., 3.]);
}
/// Test the eigensolver with matrix of constructed eigenvalues
#[test]
fn test_eigsolver_constructed() {
let n = 50;
let tmp = generate::random((n, n));
//let (v, _) = tmp.qr_square().unwrap();
let (v, _) = orthonormalize(tmp).unwrap();
// set eigenvalues in decreasing order
let t = Array2::from_diag(&Array1::linspace(n as f64, -(n as f64), n));
let a = v.dot(&t.dot(&v.t()));
// find five largest eigenvalues
check_eigenvalues(&a, Order::Largest, 5, &[50.0, 48.0, 46.0, 44.0, 42.0]);
check_eigenvalues(&a, Order::Smallest, 5, &[-50.0, -48.0, -46.0, -44.0, -42.0]);
}
#[test]
fn test_eigsolver_constrained() {
let diag = arr1(&[1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]);
let a = Array2::from_diag(&diag);
let x: Array2<f64> = generate::random((10, 1));
let y: Array2<f64> = arr2(&[
[1.0, 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1.0, 0., 0., 0., 0., 0., 0., 0., 0.],
])
.reversed_axes();
let result = lobpcg(
|y| a.dot(&y),
x,
|_| {},
Some(y),
1e-10,
50,
Order::Smallest,
);
match result {
LobpcgResult::Ok(vals, vecs, r_norms) | LobpcgResult::Err(vals, vecs, r_norms, _) => {
// check convergence
for (i, norm) in r_norms.into_iter().enumerate() {
if norm > 0.01 {
println!("==== Assertion Failed ====");
println!("The {}th eigenvalue estimation did not converge!", i);
panic!("Too large deviation of residual norm: {} > 0.01", norm);
}
}
// should be the third eigenvalue
close_l2(&vals, &Array1::from(vec![3.0]), 1e-10);
close_l2(
&vecs.column(0).mapv(|x| x.abs()),
&arr1(&[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
1e-5,
);
}
LobpcgResult::NoResult(err) => panic!("Did not converge: {:?}", err),
}
}
}
| 35.7031 | 128 | 0.534588 |
64477f72b149632a99b8d58faaa18322e5d5e8d3 | 2,491 | use ckb_db::batch::{Batch, Col, Operation};
use ckb_db::kvdb::{KeyValueDB, Result};
use ckb_util::RwLock;
use fnv::FnvHashMap;
use lru_cache::LruCache;
use std::ops::Range;
type CacheTable = FnvHashMap<Col, LruCache<Vec<u8>, Vec<u8>>>;
pub type CacheCols = (u32, usize);
pub struct CacheDB<T>
where
T: KeyValueDB,
{
db: T,
cache: RwLock<CacheTable>,
}
impl<T> CacheDB<T>
where
T: KeyValueDB,
{
pub fn new(db: T, cols: &[CacheCols]) -> Self {
let mut table = FnvHashMap::with_capacity_and_hasher(cols.len(), Default::default());
for (idx, capacity) in cols {
table.insert(Some(*idx), LruCache::new(*capacity, false));
}
CacheDB {
db,
cache: RwLock::new(table),
}
}
}
impl<T> KeyValueDB for CacheDB<T>
where
T: KeyValueDB,
{
fn cols(&self) -> u32 {
self.db.cols()
}
fn write(&self, batch: Batch) -> Result<()> {
let mut cache_guard = self.cache.write();
batch.operations.iter().for_each(|op| match op {
Operation::Insert { col, key, value } => {
if let Some(lru) = cache_guard.get_mut(&col) {
lru.insert(key.clone(), value.clone());
}
}
Operation::Delete { col, key } => {
if let Some(lru) = cache_guard.get_mut(&col) {
lru.remove(key);
}
}
});
self.db.write(batch)
}
fn read(&self, col: Col, key: &[u8]) -> Result<Option<Vec<u8>>> {
let cache_guard = self.cache.read();
if let Some(value) = cache_guard
.get(&col)
.and_then(|cache| cache.get(key))
.cloned()
{
return Ok(Some(value));
}
self.db.read(col, key)
}
fn len(&self, col: Col, key: &[u8]) -> Result<Option<usize>> {
let cache_guard = self.cache.read();
if let Some(value) = cache_guard.get(&col).and_then(|cache| cache.get(key)) {
return Ok(Some(value.len()));
}
self.db.len(col, key)
}
fn partial_read(&self, col: Col, key: &[u8], range: &Range<usize>) -> Result<Option<Vec<u8>>> {
let cache_guard = self.cache.read();
if let Some(data) = cache_guard.get(&col).and_then(|cache| cache.get(key)) {
return Ok(data.get(range.start..range.end).map(|slice| slice.to_vec()));
}
self.db.partial_read(col, key, range)
}
}
| 28.306818 | 99 | 0.533521 |
f9c91dd7580322bca0bc38aa6b550ca02998defc | 7,616 | use chrono::Utc;
use futures::StreamExt;
use serde::de::DeserializeOwned;
use serde_json::json;
use svc_agent::{
mqtt::{IncomingEventProperties, IncomingRequestProperties, IncomingResponseProperties},
AgentId,
};
use uuid::Uuid;
use crate::app::{
endpoint::{EventHandler, RequestHandler, ResponseHandler},
error::Error as AppError,
message_handler::MessageStream,
API_VERSION,
};
use self::{
agent::TestAgent,
context::TestContext,
outgoing_envelope::{
OutgoingEnvelope, OutgoingEnvelopeProperties, OutgoingEventProperties,
OutgoingRequestProperties, OutgoingResponseProperties,
},
};
///////////////////////////////////////////////////////////////////////////////
pub const SVC_AUDIENCE: &'static str = "dev.svc.example.org";
pub const USR_AUDIENCE: &'static str = "dev.usr.example.org";
pub async fn handle_request<H: RequestHandler>(
context: &mut TestContext,
agent: &TestAgent,
payload: H::Payload,
) -> Result<Vec<OutgoingEnvelope>, AppError> {
let reqp = build_reqp(agent.agent_id(), "ignore");
let messages = H::handle(context, payload, &reqp).await?;
Ok(parse_messages(messages).await)
}
pub async fn handle_response<H: ResponseHandler>(
context: &mut TestContext,
agent: &TestAgent,
payload: H::Payload,
corr_data: &H::CorrelationData,
) -> Result<Vec<OutgoingEnvelope>, AppError> {
let respp = build_respp(agent.agent_id());
let messages = H::handle(context, payload, &respp, corr_data).await?;
Ok(parse_messages(messages).await)
}
pub async fn handle_event<H: EventHandler>(
context: &mut TestContext,
agent: &TestAgent,
payload: H::Payload,
) -> Result<Vec<OutgoingEnvelope>, AppError> {
let evp = build_evp(agent.agent_id(), "ignore");
let messages = H::handle(context, payload, &evp).await?;
Ok(parse_messages(messages).await)
}
async fn parse_messages(mut messages: MessageStream) -> Vec<OutgoingEnvelope> {
let mut parsed_messages = vec![];
while let Some(message) = messages.next().await {
let dump = message
.into_dump(TestAgent::new("alpha", "conference", SVC_AUDIENCE).address())
.expect("Failed to dump outgoing message");
let mut parsed_message = serde_json::from_str::<OutgoingEnvelope>(dump.payload())
.expect("Failed to parse dumped message");
parsed_message.set_topic(dump.topic());
parsed_messages.push(parsed_message);
}
parsed_messages
}
pub fn find_event<P>(messages: &[OutgoingEnvelope]) -> (P, &OutgoingEventProperties, &str)
where
P: DeserializeOwned,
{
for message in messages {
if let OutgoingEnvelopeProperties::Event(evp) = message.properties() {
return (message.payload::<P>(), evp, message.topic());
}
}
panic!("Event not found");
}
pub fn find_event_by_predicate<P, F>(
messages: &[OutgoingEnvelope],
f: F,
) -> Option<(P, &OutgoingEventProperties, &str)>
where
P: DeserializeOwned,
F: Fn(&OutgoingEventProperties, P, &str) -> bool,
{
for message in messages {
if let OutgoingEnvelopeProperties::Event(evp) = message.properties() {
if f(evp, message.payload::<P>(), message.topic()) {
return Some((message.payload::<P>(), evp, message.topic()));
}
}
}
return None;
}
pub fn find_response<P>(messages: &[OutgoingEnvelope]) -> (P, &OutgoingResponseProperties, &str)
where
P: DeserializeOwned,
{
for message in messages {
if let OutgoingEnvelopeProperties::Response(respp) = message.properties() {
return (message.payload::<P>(), respp, message.topic());
}
}
panic!("Response not found");
}
pub fn find_request<P>(messages: &[OutgoingEnvelope]) -> (P, &OutgoingRequestProperties, &str)
where
P: DeserializeOwned,
{
for message in messages {
if let OutgoingEnvelopeProperties::Request(reqp) = message.properties() {
return (message.payload::<P>(), reqp, message.topic());
}
}
panic!("Request not found");
}
pub fn build_reqp(agent_id: &AgentId, method: &str) -> IncomingRequestProperties {
let now = Utc::now().timestamp_millis().to_string();
let reqp_json = json!({
"type": "request",
"correlation_data": "123456789",
"agent_id": agent_id,
"connection_mode": "default",
"connection_version": "v2",
"method": method,
"response_topic": format!(
"agents/{}/api/{}/in/conference.{}",
agent_id, API_VERSION, SVC_AUDIENCE
),
"broker_agent_id": format!("alpha.mqtt-gateway.{}", SVC_AUDIENCE),
"broker_timestamp": now,
"broker_processing_timestamp": now,
"broker_initial_processing_timestamp": now,
"tracking_id": format!("{}.{}.{}", Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()),
"session_tracking_label": format!(
"{}.{} {}.{}",
Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()
),
});
serde_json::from_value::<IncomingRequestProperties>(reqp_json).expect("Failed to parse reqp")
}
pub fn build_respp(agent_id: &AgentId) -> IncomingResponseProperties {
let now = Utc::now().timestamp_millis().to_string();
let respp_json = json!({
"type": "response",
"status": "200",
"correlation_data": "ignore",
"agent_id": agent_id,
"connection_mode": "default",
"connection_version": "v2",
"broker_agent_id": format!("alpha.mqtt-gateway.{}", SVC_AUDIENCE),
"broker_timestamp": now,
"broker_processing_timestamp": now,
"broker_initial_processing_timestamp": now,
"tracking_id": format!("{}.{}.{}", Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()),
"session_tracking_label": format!(
"{}.{} {}.{}",
Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()
),
});
serde_json::from_value::<IncomingResponseProperties>(respp_json).expect("Failed to parse respp")
}
pub fn build_evp(agent_id: &AgentId, label: &str) -> IncomingEventProperties {
let now = Utc::now().timestamp_millis().to_string();
let evp_json = json!({
"type": "event",
"label": label,
"agent_id": agent_id,
"connection_mode": "default",
"connection_version": "v2",
"broker_agent_id": format!("alpha.mqtt-gateway.{}", SVC_AUDIENCE),
"broker_timestamp": now,
"broker_processing_timestamp": now,
"broker_initial_processing_timestamp": now,
"tracking_id": format!("{}.{}.{}", Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()),
"session_tracking_label": format!(
"{}.{} {}.{}",
Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()
),
});
serde_json::from_value::<IncomingEventProperties>(evp_json).expect("Failed to parse evp")
}
///////////////////////////////////////////////////////////////////////////////
pub mod prelude {
#[allow(unused_imports)]
pub use crate::app::context::GlobalContext;
#[allow(unused_imports)]
pub use super::{
agent::TestAgent, authz::TestAuthz, build_evp, build_reqp, build_respp,
context::TestContext, db::TestDb, factory, find_event, find_request, find_response,
handle_event, handle_request, handle_response, shared_helpers, SVC_AUDIENCE, USR_AUDIENCE,
};
}
pub mod agent;
pub mod authz;
pub mod context;
pub mod db;
pub mod factory;
pub mod outgoing_envelope;
pub mod shared_helpers;
pub mod test_deps;
| 32.271186 | 100 | 0.624606 |
e98d8ef2ffd0938d96312fb83a6b50f48b732238 | 13,182 | // Copyright (c) Microsoft. All rights reserved.
use edgelet_core::{Module, ModuleRegistry, ModuleRuntime, ModuleStatus};
use edgelet_http::route::{BoxFuture, Handler, Parameters};
use failure::ResultExt;
use futures::{future, Future, Stream};
use http::header::{CONTENT_LENGTH, CONTENT_TYPE};
use http::{Request, Response, StatusCode};
use hyper::{Body, Error as HyperError};
use management::models::*;
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json;
use url::form_urlencoded::parse as parse_query;
use super::{spec_to_core, spec_to_details};
use error::{Error, ErrorKind};
use IntoResponse;
pub struct UpdateModule<M>
where
M: 'static + ModuleRuntime + Clone,
<M::Module as Module>::Config: DeserializeOwned + Serialize,
{
runtime: M,
}
impl<M> UpdateModule<M>
where
M: 'static + ModuleRuntime + Clone,
<M::Module as Module>::Config: DeserializeOwned + Serialize,
{
pub fn new(runtime: M) -> Self {
UpdateModule { runtime }
}
}
impl<M> Handler<Parameters> for UpdateModule<M>
where
M: 'static + ModuleRuntime + Clone,
<M::Module as Module>::Config: DeserializeOwned + Serialize,
M::Error: IntoResponse,
<M::ModuleRegistry as ModuleRegistry>::Error: IntoResponse,
{
fn handle(
&self,
req: Request<Body>,
_params: Parameters,
) -> BoxFuture<Response<Body>, HyperError> {
let runtime = self.runtime.clone();
let start: bool = req
.uri()
.query()
.and_then(|query| {
parse_query(query.as_bytes())
.find(|&(ref key, _)| key == "start")
.and_then(|(_, v)| if v != "false" { Some(()) } else { None })
.map(|_| true)
})
.unwrap_or_else(|| false);
let response = req
.into_body()
.concat2()
.and_then(move |b| {
serde_json::from_slice::<ModuleSpec>(&b)
.context(ErrorKind::BadBody)
.map_err(From::from)
.and_then(|spec| {
spec_to_core::<M>(&spec)
.context(ErrorKind::BadBody)
.map_err(Error::from)
.map(|core_spec| (core_spec, spec))
})
.map(move |(core_spec, spec)| {
let name = core_spec.name().to_string();
if start {
info!("Updating and starting module {}", name);
} else {
info!("Updating module {}", name);
}
let created = runtime
.remove(&name)
.and_then(move |_| {
debug!("Removed existing module {}", name);
runtime
.registry()
.pull(core_spec.config())
.and_then(move |_| {
debug!("Successfully pulled new image for module {}", name);
runtime.create(core_spec).and_then(move |_| {
debug!("Created module {}", name);
if start {
info!("Starting module {}", name);
future::Either::A(
runtime
.start(&name)
.map(|_| ModuleStatus::Running),
)
} else {
future::Either::B(future::ok(ModuleStatus::Stopped))
}.map(
move |status| {
let details = spec_to_details(&spec, &status);
serde_json::to_string(&details)
.context(ErrorKind::Serde)
.map(|b| {
Response::builder()
.status(StatusCode::OK)
.header(
CONTENT_TYPE,
"application/json",
)
.header(
CONTENT_LENGTH,
b.len().to_string().as_str(),
)
.body(b.into())
.unwrap_or_else(|e| {
e.into_response()
})
})
.unwrap_or_else(|e| e.into_response())
},
)
})
})
})
.or_else(|e| future::ok(e.into_response()));
future::Either::A(created)
})
.unwrap_or_else(|e| future::Either::B(future::ok(e.into_response())))
})
.or_else(|e| future::ok(e.into_response()));
Box::new(response)
}
}
#[cfg(test)]
mod tests {
use chrono::prelude::*;
use edgelet_core::{ModuleRuntimeState, ModuleStatus};
use edgelet_http::route::Parameters;
use edgelet_test_utils::module::*;
use management::models::{Config, ErrorResponse};
use server::module::tests::Error;
use super::*;
lazy_static! {
static ref RUNTIME: TestRuntime<Error> = {
let state = ModuleRuntimeState::default()
.with_status(ModuleStatus::Running)
.with_exit_code(Some(0))
.with_status_description(Some("description".to_string()))
.with_started_at(Some(Utc.ymd(2018, 4, 13).and_hms_milli(14, 20, 0, 1)))
.with_finished_at(Some(Utc.ymd(2018, 4, 13).and_hms_milli(15, 20, 0, 1)))
.with_image_id(Some("image-id".to_string()));
let config = TestConfig::new("microsoft/test-image".to_string());
let module = TestModule::new("test-module".to_string(), config, Ok(state));
TestRuntime::new(Ok(module))
};
}
#[test]
fn success() {
let handler = UpdateModule::new(RUNTIME.clone());
let config = Config::new(json!({"image":"microsoft/test-image"}));
let spec = ModuleSpec::new("test-module".to_string(), "docker".to_string(), config);
let request = Request::put("http://localhost/modules/test-module")
.body(serde_json::to_string(&spec).unwrap().into())
.unwrap();
// act
let response = handler.handle(request, Parameters::new()).wait().unwrap();
// assert
assert_eq!(StatusCode::OK, response.status());
assert_eq!("160", *response.headers().get(CONTENT_LENGTH).unwrap());
assert_eq!(
"application/json",
*response.headers().get(CONTENT_TYPE).unwrap()
);
response
.into_body()
.concat2()
.and_then(|b| {
let details: ModuleDetails = serde_json::from_slice(&b).unwrap();
assert_eq!("test-module", details.name());
assert_eq!("docker", details.type_());
assert_eq!(
"microsoft/test-image",
details.config().settings().get("image").unwrap()
);
assert_eq!("stopped", details.status().runtime_status().status());
assert_eq!(160, b.len());
Ok(())
})
.wait()
.unwrap();
}
#[test]
fn success_start() {
let handler = UpdateModule::new(RUNTIME.clone());
let config = Config::new(json!({"image":"microsoft/test-image"}));
let spec = ModuleSpec::new("test-module".to_string(), "docker".to_string(), config);
let request = Request::put("http://localhost/modules/test-module?start")
.body(serde_json::to_string(&spec).unwrap().into())
.unwrap();
// act
let response = handler.handle(request, Parameters::new()).wait().unwrap();
// assert
assert_eq!(StatusCode::OK, response.status());
assert_eq!("160", *response.headers().get(CONTENT_LENGTH).unwrap());
assert_eq!(
"application/json",
*response.headers().get(CONTENT_TYPE).unwrap()
);
response
.into_body()
.concat2()
.and_then(|b| {
let details: ModuleDetails = serde_json::from_slice(&b).unwrap();
assert_eq!("test-module", details.name());
assert_eq!("docker", details.type_());
assert_eq!(
"microsoft/test-image",
details.config().settings().get("image").unwrap()
);
assert_eq!("running", details.status().runtime_status().status());
assert_eq!(160, b.len());
Ok(())
})
.wait()
.unwrap();
}
#[test]
fn bad_body() {
let handler = UpdateModule::new(RUNTIME.clone());
let body = "invalid";
let request = Request::put("http://localhost/modules/test-module")
.body(body.into())
.unwrap();
// act
let response = handler.handle(request, Parameters::new()).wait().unwrap();
// assert
assert_eq!(StatusCode::BAD_REQUEST, response.status());
response
.into_body()
.concat2()
.and_then(|b| {
let error_response: ErrorResponse = serde_json::from_slice(&b).unwrap();
let expected = "Bad body\n\tcaused by: expected value at line 1 column 1";
assert_eq!(expected, error_response.message());
Ok(())
})
.wait()
.unwrap();
}
#[test]
fn runtime_error() {
let runtime = TestRuntime::new(Err(Error::General));
let handler = UpdateModule::new(runtime);
let config = Config::new(json!({"image":"microsoft/test-image"}));
let spec = ModuleSpec::new("test-module".to_string(), "docker".to_string(), config);
let request = Request::put("http://localhost/modules/test-module")
.body(serde_json::to_string(&spec).unwrap().into())
.unwrap();
// act
let response = handler.handle(request, Parameters::new()).wait().unwrap();
// assert
assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, response.status());
response
.into_body()
.concat2()
.and_then(|b| {
let error: ErrorResponse = serde_json::from_slice(&b).unwrap();
assert_eq!("General error", error.message());
Ok(())
})
.wait()
.unwrap();
}
#[test]
fn bad_settings() {
let runtime = TestRuntime::new(Err(Error::General));
let handler = UpdateModule::new(runtime);
let config = Config::new(json!({}));
let spec = ModuleSpec::new("test-module".to_string(), "docker".to_string(), config);
let request = Request::put("http://localhost/modules/test-module")
.body(serde_json::to_string(&spec).unwrap().into())
.unwrap();
// act
let response = handler.handle(request, Parameters::new()).wait().unwrap();
// assert
assert_eq!(StatusCode::BAD_REQUEST, response.status());
response
.into_body()
.concat2()
.and_then(|b| {
let error: ErrorResponse = serde_json::from_slice(&b).unwrap();
assert_eq!(
"Bad body\n\tcaused by: Serde error\n\tcaused by: missing field `image`",
error.message()
);
Ok(())
})
.wait()
.unwrap();
}
}
| 40.189024 | 100 | 0.442346 |
67ad53925ff36be42004ca67fa9edcd2f3d54fde | 3,538 | use crate::onig::{Regex, RegexOptions, Syntax};
use crate::yaml_rust::Yaml;
use crate::UserAgentParserError;
#[derive(Debug)]
pub struct DeviceRegex {
pub(crate) regex: Regex,
pub(crate) device_replacement: Option<String>,
pub(crate) brand_replacement: Option<String>,
pub(crate) model_replacement: Option<String>,
}
impl DeviceRegex {
pub fn from_yaml(yaml: &Yaml) -> Result<Vec<DeviceRegex>, UserAgentParserError> {
let yamls = yaml.as_vec().ok_or(UserAgentParserError::IncorrectSource)?;
let yamls_len = yamls.len();
if yamls_len == 0 {
Err(UserAgentParserError::IncorrectSource)
} else {
let mut device_regexes = Vec::with_capacity(yamls_len);
let yaml_regex = Yaml::String("regex".to_string());
let yaml_device_replacement = Yaml::String("device_replacement".to_string());
let yaml_brand_replacement = Yaml::String("brand_replacement".to_string());
let yaml_model_replacement = Yaml::String("model_replacement".to_string());
let yaml_regex_flag = Yaml::String("regex_flag".to_string());
for yaml in yamls {
let yaml = yaml.as_hash().ok_or(UserAgentParserError::IncorrectSource)?;
let device_replacement = match yaml.get(&yaml_device_replacement) {
Some(yaml) => {
yaml.as_str()
.map(|s| Some(s.to_string()))
.ok_or(UserAgentParserError::IncorrectSource)?
}
None => None,
};
let brand_replacement = match yaml.get(&yaml_brand_replacement) {
Some(yaml) => {
yaml.as_str()
.map(|s| Some(s.to_string()))
.ok_or(UserAgentParserError::IncorrectSource)?
}
None => None,
};
let model_replacement = match yaml.get(&yaml_model_replacement) {
Some(yaml) => {
yaml.as_str()
.map(|s| Some(s.to_string()))
.ok_or(UserAgentParserError::IncorrectSource)?
}
None => None,
};
let regex_options = if let Some(yaml) = yaml.get(&yaml_regex_flag) {
let regex_flag = yaml.as_str().ok_or(UserAgentParserError::IncorrectSource)?;
if regex_flag == "i" {
RegexOptions::REGEX_OPTION_IGNORECASE
} else {
RegexOptions::REGEX_OPTION_NONE
}
} else {
RegexOptions::REGEX_OPTION_NONE
};
let regex = Regex::with_options(
yaml.get(&yaml_regex)
.ok_or(UserAgentParserError::IncorrectSource)?
.as_str()
.ok_or(UserAgentParserError::IncorrectSource)?,
regex_options,
Syntax::default(),
)?;
let device_regex = DeviceRegex {
regex,
device_replacement,
brand_replacement,
model_replacement,
};
device_regexes.push(device_regex);
}
Ok(device_regexes)
}
}
}
| 37.242105 | 97 | 0.499435 |
e97006015871b64cd8dc55c0453bc9e7f315bec8 | 814 | use std::cell::RefCell;
use std::rc::Rc;
pub type KeyboardShared = Rc<RefCell<Keyboard>>;
pub struct Keyboard {
lines: [u8; 10],
active_line: usize,
}
impl Keyboard {
pub fn new_shared() -> KeyboardShared {
let keyboard = Keyboard {
lines: [0xff; 10],
active_line: 0,
};
Rc::new(RefCell::new(keyboard))
}
pub fn reset_all(&mut self) {
self.lines = [0xff; 10];
}
pub fn set_key(&mut self, line: usize, bit: u8) {
self.lines[line] &= !(1 << bit);
}
pub fn set_active_line(&mut self, line: usize) {
self.active_line = line;
}
pub fn scan_active_line(&self) -> u8 {
if self.active_line < 10 {
self.lines[self.active_line]
} else {
0xff
}
}
}
| 19.853659 | 53 | 0.530713 |
6282d378f41da1af93ee32842459d52fc8a1ffd7 | 5,029 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
#![allow(non_camel_case_types, non_upper_case_globals, non_snake_case)]
#![allow(
clippy::approx_constant,
clippy::type_complexity,
clippy::unreadable_literal,
clippy::upper_case_acronyms
)]
#![cfg_attr(feature = "dox", feature(doc_cfg))]
#[allow(unused_imports)]
use libc::{
c_char, c_double, c_float, c_int, c_long, c_short, c_uchar, c_uint, c_ulong, c_ushort, c_void,
intptr_t, size_t, ssize_t, uintptr_t, FILE,
};
#[allow(unused_imports)]
use glib::{gboolean, gconstpointer, gpointer, GType};
// Enums
pub type ALSAHwdepIfaceType = c_int;
pub const ALSAHWDEP_IFACE_TYPE_OPL2: ALSAHwdepIfaceType = 0;
pub const ALSAHWDEP_IFACE_TYPE_OPL3: ALSAHwdepIfaceType = 1;
pub const ALSAHWDEP_IFACE_TYPE_OPL4: ALSAHwdepIfaceType = 2;
pub const ALSAHWDEP_IFACE_TYPE_SB16CSP: ALSAHwdepIfaceType = 3;
pub const ALSAHWDEP_IFACE_TYPE_EMU10K1: ALSAHwdepIfaceType = 4;
pub const ALSAHWDEP_IFACE_TYPE_YSS225: ALSAHwdepIfaceType = 5;
pub const ALSAHWDEP_IFACE_TYPE_ICS2115: ALSAHwdepIfaceType = 6;
pub const ALSAHWDEP_IFACE_TYPE_SSCAPE: ALSAHwdepIfaceType = 7;
pub const ALSAHWDEP_IFACE_TYPE_VX: ALSAHwdepIfaceType = 8;
pub const ALSAHWDEP_IFACE_TYPE_MIXART: ALSAHwdepIfaceType = 9;
pub const ALSAHWDEP_IFACE_TYPE_USX2Y: ALSAHwdepIfaceType = 10;
pub const ALSAHWDEP_IFACE_TYPE_EMUX_WAVETABLE: ALSAHwdepIfaceType = 11;
pub const ALSAHWDEP_IFACE_TYPE_BLUETOOTH: ALSAHwdepIfaceType = 12;
pub const ALSAHWDEP_IFACE_TYPE_USX2Y_PCM: ALSAHwdepIfaceType = 13;
pub const ALSAHWDEP_IFACE_TYPE_PCXHR: ALSAHwdepIfaceType = 14;
pub const ALSAHWDEP_IFACE_TYPE_SB_RC: ALSAHwdepIfaceType = 15;
pub const ALSAHWDEP_IFACE_TYPE_HDA: ALSAHwdepIfaceType = 16;
pub const ALSAHWDEP_IFACE_TYPE_USB_STREAM: ALSAHwdepIfaceType = 17;
pub const ALSAHWDEP_IFACE_TYPE_FW_DICE: ALSAHwdepIfaceType = 18;
pub const ALSAHWDEP_IFACE_TYPE_FW_FIREWORKS: ALSAHwdepIfaceType = 19;
pub const ALSAHWDEP_IFACE_TYPE_FW_BEBOB: ALSAHwdepIfaceType = 20;
pub const ALSAHWDEP_IFACE_TYPE_FW_OXFW: ALSAHwdepIfaceType = 21;
pub const ALSAHWDEP_IFACE_TYPE_FW_DIGI00X: ALSAHwdepIfaceType = 22;
pub const ALSAHWDEP_IFACE_TYPE_FW_TASCAM: ALSAHwdepIfaceType = 23;
pub const ALSAHWDEP_IFACE_TYPE_LINE6: ALSAHwdepIfaceType = 24;
pub const ALSAHWDEP_IFACE_TYPE_FW_MOTU: ALSAHwdepIfaceType = 25;
pub const ALSAHWDEP_IFACE_TYPE_FW_FIREFACE: ALSAHwdepIfaceType = 26;
// Records
#[derive(Copy, Clone)]
#[repr(C)]
pub struct ALSAHwdepDeviceInfoClass {
pub parent_class: gobject::GObjectClass,
}
impl ::std::fmt::Debug for ALSAHwdepDeviceInfoClass {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("ALSAHwdepDeviceInfoClass @ {:p}", self))
.field("parent_class", &self.parent_class)
.finish()
}
}
#[repr(C)]
pub struct _ALSAHwdepDeviceInfoPrivate {
_data: [u8; 0],
_marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
}
pub type ALSAHwdepDeviceInfoPrivate = *mut _ALSAHwdepDeviceInfoPrivate;
// Classes
#[derive(Copy, Clone)]
#[repr(C)]
pub struct ALSAHwdepDeviceInfo {
pub parent_instance: gobject::GObject,
pub priv_: *mut ALSAHwdepDeviceInfoPrivate,
}
impl ::std::fmt::Debug for ALSAHwdepDeviceInfo {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct(&format!("ALSAHwdepDeviceInfo @ {:p}", self))
.field("parent_instance", &self.parent_instance)
.field("priv_", &self.priv_)
.finish()
}
}
#[link(name = "alsahwdep")]
extern "C" {
//=========================================================================
// ALSAHwdepIfaceType
//=========================================================================
pub fn alsahwdep_iface_type_get_type() -> GType;
//=========================================================================
// ALSAHwdepDeviceInfo
//=========================================================================
pub fn alsahwdep_device_info_get_type() -> GType;
//=========================================================================
// Other functions
//=========================================================================
pub fn alsahwdep_get_device_id_list(
card_id: c_uint,
entries: *mut *mut c_uint,
entry_count: *mut size_t,
error: *mut *mut glib::GError,
);
pub fn alsahwdep_get_device_info(
card_id: c_uint,
device_id: c_uint,
device_info: *mut *mut ALSAHwdepDeviceInfo,
error: *mut *mut glib::GError,
);
pub fn alsahwdep_get_hwdep_devnode(
card_id: c_uint,
device_id: c_uint,
devnode: *mut *mut c_char,
error: *mut *mut glib::GError,
);
pub fn alsahwdep_get_hwdep_sysname(
card_id: c_uint,
device_id: c_uint,
sysname: *mut *mut c_char,
error: *mut *mut glib::GError,
);
}
| 36.977941 | 98 | 0.662557 |
9b37833b9dc5b6900e3d5b16b8d35090a7bb2ff0 | 1,222 | use anyhow::Result;
use futures::prelude::*;
use kv2::{CommandRequest, MemTable, Service, ServiceInner};
use prost::Message;
use tokio::net::TcpListener;
use tokio_util::codec::{Framed, LengthDelimitedCodec};
use tracing::info;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let service: Service = ServiceInner::new(MemTable::new()).into();
let addr = "127.0.0.1:9527";
let listener = TcpListener::bind(addr).await?;
info!("Start listening on {}", addr);
loop {
let (stream, addr) = listener.accept().await?;
info!("Client {:?} connected", addr);
let svc = service.clone();
tokio::spawn(async move {
let mut stream = Framed::new(stream, LengthDelimitedCodec::new());
while let Some(Ok(mut buf)) = stream.next().await {
let cmd = CommandRequest::decode(&buf[..]).unwrap();
info!("Got a new command: {:?}", cmd);
let res = svc.execute(cmd);
buf.clear();
res.encode(&mut buf).unwrap();
stream.send(buf.freeze()).await.unwrap();
}
info!("Client {:?} disconnected", addr);
});
}
}
| 35.941176 | 78 | 0.56383 |
1653d3bdba16e746803d9511d06332d0f719469f | 24,125 | use crate::interface::parse_cfgspecs;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{emitter::HumanReadableErrorType, registry, ColorConfig};
use rustc_session::config::InstrumentCoverage;
use rustc_session::config::Strip;
use rustc_session::config::{build_configuration, build_session_options, to_crate_config};
use rustc_session::config::{rustc_optgroups, ErrorOutputType, ExternLocation, Options, Passes};
use rustc_session::config::{CFGuard, ExternEntry, LinkerPluginLto, LtoCli, SwitchWithOptPath};
use rustc_session::config::{
Externs, OutputType, OutputTypes, SanitizerSet, SymbolManglingVersion, WasiExecModel,
};
use rustc_session::lint::Level;
use rustc_session::search_paths::SearchPath;
use rustc_session::utils::{CanonicalizedPath, NativeLibKind};
use rustc_session::{build_session, getopts, DiagnosticOutput, Session};
use rustc_span::edition::{Edition, DEFAULT_EDITION};
use rustc_span::symbol::sym;
use rustc_span::SourceFileHashAlgorithm;
use rustc_target::spec::{CodeModel, LinkerFlavor, MergeFunctions, PanicStrategy};
use rustc_target::spec::{RelocModel, RelroLevel, SplitDebuginfo, TlsModel};
use std::collections::{BTreeMap, BTreeSet};
use std::iter::FromIterator;
use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
type CfgSpecs = FxHashSet<(String, Option<String>)>;
fn build_session_options_and_crate_config(matches: getopts::Matches) -> (Options, CfgSpecs) {
let sessopts = build_session_options(&matches);
let cfg = parse_cfgspecs(matches.opt_strs("cfg"));
(sessopts, cfg)
}
fn mk_session(matches: getopts::Matches) -> (Session, CfgSpecs) {
let registry = registry::Registry::new(&[]);
let (sessopts, cfg) = build_session_options_and_crate_config(matches);
let sess = build_session(
sessopts,
None,
registry,
DiagnosticOutput::Default,
Default::default(),
None,
None,
);
(sess, cfg)
}
fn new_public_extern_entry<S, I>(locations: I) -> ExternEntry
where
S: Into<String>,
I: IntoIterator<Item = S>,
{
let locations: BTreeSet<CanonicalizedPath> =
locations.into_iter().map(|s| CanonicalizedPath::new(Path::new(&s.into()))).collect();
ExternEntry {
location: ExternLocation::ExactPaths(locations),
is_private_dep: false,
add_prelude: true,
}
}
fn optgroups() -> getopts::Options {
let mut opts = getopts::Options::new();
for group in rustc_optgroups() {
(group.apply)(&mut opts);
}
return opts;
}
fn mk_map<K: Ord, V>(entries: Vec<(K, V)>) -> BTreeMap<K, V> {
BTreeMap::from_iter(entries.into_iter())
}
// When the user supplies --test we should implicitly supply --cfg test
#[test]
fn test_switch_implies_cfg_test() {
rustc_span::with_default_session_globals(|| {
let matches = optgroups().parse(&["--test".to_string()]).unwrap();
let (sess, cfg) = mk_session(matches);
let cfg = build_configuration(&sess, to_crate_config(cfg));
assert!(cfg.contains(&(sym::test, None)));
});
}
// When the user supplies --test and --cfg test, don't implicitly add another --cfg test
#[test]
fn test_switch_implies_cfg_test_unless_cfg_test() {
rustc_span::with_default_session_globals(|| {
let matches = optgroups().parse(&["--test".to_string(), "--cfg=test".to_string()]).unwrap();
let (sess, cfg) = mk_session(matches);
let cfg = build_configuration(&sess, to_crate_config(cfg));
let mut test_items = cfg.iter().filter(|&&(name, _)| name == sym::test);
assert!(test_items.next().is_some());
assert!(test_items.next().is_none());
});
}
#[test]
fn test_can_print_warnings() {
rustc_span::with_default_session_globals(|| {
let matches = optgroups().parse(&["-Awarnings".to_string()]).unwrap();
let (sess, _) = mk_session(matches);
assert!(!sess.diagnostic().can_emit_warnings());
});
rustc_span::with_default_session_globals(|| {
let matches =
optgroups().parse(&["-Awarnings".to_string(), "-Dwarnings".to_string()]).unwrap();
let (sess, _) = mk_session(matches);
assert!(sess.diagnostic().can_emit_warnings());
});
rustc_span::with_default_session_globals(|| {
let matches = optgroups().parse(&["-Adead_code".to_string()]).unwrap();
let (sess, _) = mk_session(matches);
assert!(sess.diagnostic().can_emit_warnings());
});
}
#[test]
fn test_output_types_tracking_hash_different_paths() {
let mut v1 = Options::default();
let mut v2 = Options::default();
let mut v3 = Options::default();
v1.output_types = OutputTypes::new(&[(OutputType::Exe, Some(PathBuf::from("./some/thing")))]);
v2.output_types = OutputTypes::new(&[(OutputType::Exe, Some(PathBuf::from("/some/thing")))]);
v3.output_types = OutputTypes::new(&[(OutputType::Exe, None)]);
assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash());
assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash());
assert!(v2.dep_tracking_hash() != v3.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash());
assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash());
}
#[test]
fn test_output_types_tracking_hash_different_construction_order() {
let mut v1 = Options::default();
let mut v2 = Options::default();
v1.output_types = OutputTypes::new(&[
(OutputType::Exe, Some(PathBuf::from("./some/thing"))),
(OutputType::Bitcode, Some(PathBuf::from("./some/thing.bc"))),
]);
v2.output_types = OutputTypes::new(&[
(OutputType::Bitcode, Some(PathBuf::from("./some/thing.bc"))),
(OutputType::Exe, Some(PathBuf::from("./some/thing"))),
]);
assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
}
#[test]
fn test_externs_tracking_hash_different_construction_order() {
let mut v1 = Options::default();
let mut v2 = Options::default();
let mut v3 = Options::default();
v1.externs = Externs::new(mk_map(vec![
(String::from("a"), new_public_extern_entry(vec!["b", "c"])),
(String::from("d"), new_public_extern_entry(vec!["e", "f"])),
]));
v2.externs = Externs::new(mk_map(vec![
(String::from("d"), new_public_extern_entry(vec!["e", "f"])),
(String::from("a"), new_public_extern_entry(vec!["b", "c"])),
]));
v3.externs = Externs::new(mk_map(vec![
(String::from("a"), new_public_extern_entry(vec!["b", "c"])),
(String::from("d"), new_public_extern_entry(vec!["f", "e"])),
]));
assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash());
assert_eq!(v1.dep_tracking_hash(), v3.dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v3.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash());
assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash());
}
#[test]
fn test_lints_tracking_hash_different_values() {
let mut v1 = Options::default();
let mut v2 = Options::default();
let mut v3 = Options::default();
v1.lint_opts = vec![
(String::from("a"), Level::Allow),
(String::from("b"), Level::Warn),
(String::from("c"), Level::Deny),
(String::from("d"), Level::Forbid),
];
v2.lint_opts = vec![
(String::from("a"), Level::Allow),
(String::from("b"), Level::Warn),
(String::from("X"), Level::Deny),
(String::from("d"), Level::Forbid),
];
v3.lint_opts = vec![
(String::from("a"), Level::Allow),
(String::from("b"), Level::Warn),
(String::from("c"), Level::Forbid),
(String::from("d"), Level::Deny),
];
assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash());
assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash());
assert!(v2.dep_tracking_hash() != v3.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash());
assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash());
}
#[test]
fn test_lints_tracking_hash_different_construction_order() {
let mut v1 = Options::default();
let mut v2 = Options::default();
v1.lint_opts = vec![
(String::from("a"), Level::Allow),
(String::from("b"), Level::Warn),
(String::from("c"), Level::Deny),
(String::from("d"), Level::Forbid),
];
v2.lint_opts = vec![
(String::from("a"), Level::Allow),
(String::from("c"), Level::Deny),
(String::from("b"), Level::Warn),
(String::from("d"), Level::Forbid),
];
assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash());
}
#[test]
fn test_search_paths_tracking_hash_different_order() {
let mut v1 = Options::default();
let mut v2 = Options::default();
let mut v3 = Options::default();
let mut v4 = Options::default();
const JSON: ErrorOutputType = ErrorOutputType::Json {
pretty: false,
json_rendered: HumanReadableErrorType::Default(ColorConfig::Never),
};
// Reference
v1.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
v1.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
v1.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
v1.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
v1.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
v2.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
v2.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
v2.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
v2.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
v2.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
v3.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
v3.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
v3.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
v3.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
v3.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
v4.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
v4.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
v4.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
v4.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
v4.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
assert!(v1.dep_tracking_hash() == v2.dep_tracking_hash());
assert!(v1.dep_tracking_hash() == v3.dep_tracking_hash());
assert!(v1.dep_tracking_hash() == v4.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash());
assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash());
assert_eq!(v4.dep_tracking_hash(), v4.clone().dep_tracking_hash());
}
#[test]
fn test_native_libs_tracking_hash_different_values() {
let mut v1 = Options::default();
let mut v2 = Options::default();
let mut v3 = Options::default();
let mut v4 = Options::default();
// Reference
v1.libs = vec![
(String::from("a"), None, NativeLibKind::StaticBundle),
(String::from("b"), None, NativeLibKind::Framework),
(String::from("c"), None, NativeLibKind::Unspecified),
];
// Change label
v2.libs = vec![
(String::from("a"), None, NativeLibKind::StaticBundle),
(String::from("X"), None, NativeLibKind::Framework),
(String::from("c"), None, NativeLibKind::Unspecified),
];
// Change kind
v3.libs = vec![
(String::from("a"), None, NativeLibKind::StaticBundle),
(String::from("b"), None, NativeLibKind::StaticBundle),
(String::from("c"), None, NativeLibKind::Unspecified),
];
// Change new-name
v4.libs = vec![
(String::from("a"), None, NativeLibKind::StaticBundle),
(String::from("b"), Some(String::from("X")), NativeLibKind::Framework),
(String::from("c"), None, NativeLibKind::Unspecified),
];
assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash());
assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash());
assert!(v1.dep_tracking_hash() != v4.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash());
assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash());
assert_eq!(v4.dep_tracking_hash(), v4.clone().dep_tracking_hash());
}
#[test]
fn test_native_libs_tracking_hash_different_order() {
let mut v1 = Options::default();
let mut v2 = Options::default();
let mut v3 = Options::default();
// Reference
v1.libs = vec![
(String::from("a"), None, NativeLibKind::StaticBundle),
(String::from("b"), None, NativeLibKind::Framework),
(String::from("c"), None, NativeLibKind::Unspecified),
];
v2.libs = vec![
(String::from("b"), None, NativeLibKind::Framework),
(String::from("a"), None, NativeLibKind::StaticBundle),
(String::from("c"), None, NativeLibKind::Unspecified),
];
v3.libs = vec![
(String::from("c"), None, NativeLibKind::Unspecified),
(String::from("a"), None, NativeLibKind::StaticBundle),
(String::from("b"), None, NativeLibKind::Framework),
];
assert!(v1.dep_tracking_hash() == v2.dep_tracking_hash());
assert!(v1.dep_tracking_hash() == v3.dep_tracking_hash());
assert!(v2.dep_tracking_hash() == v3.dep_tracking_hash());
// Check clone
assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash());
assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash());
assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash());
}
#[test]
fn test_codegen_options_tracking_hash() {
let reference = Options::default();
let mut opts = Options::default();
macro_rules! untracked {
($name: ident, $non_default_value: expr) => {
opts.cg.$name = $non_default_value;
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
};
}
// Make sure that changing an [UNTRACKED] option leaves the hash unchanged.
// This list is in alphabetical order.
untracked!(ar, String::from("abc"));
untracked!(codegen_units, Some(42));
untracked!(default_linker_libraries, true);
untracked!(extra_filename, String::from("extra-filename"));
untracked!(incremental, Some(String::from("abc")));
// `link_arg` is omitted because it just forwards to `link_args`.
untracked!(link_args, vec![String::from("abc"), String::from("def")]);
untracked!(link_dead_code, Some(true));
untracked!(link_self_contained, Some(true));
untracked!(linker, Some(PathBuf::from("linker")));
untracked!(linker_flavor, Some(LinkerFlavor::Gcc));
untracked!(no_stack_check, true);
untracked!(remark, Passes::Some(vec![String::from("pass1"), String::from("pass2")]));
untracked!(rpath, true);
untracked!(save_temps, true);
macro_rules! tracked {
($name: ident, $non_default_value: expr) => {
opts = reference.clone();
opts.cg.$name = $non_default_value;
assert_ne!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
};
}
// Make sure that changing a [TRACKED] option changes the hash.
// This list is in alphabetical order.
tracked!(code_model, Some(CodeModel::Large));
tracked!(control_flow_guard, CFGuard::Checks);
tracked!(debug_assertions, Some(true));
tracked!(debuginfo, 0xdeadbeef);
tracked!(embed_bitcode, false);
tracked!(force_frame_pointers, Some(false));
tracked!(force_unwind_tables, Some(true));
tracked!(inline_threshold, Some(0xf007ba11));
tracked!(linker_plugin_lto, LinkerPluginLto::LinkerPluginAuto);
tracked!(llvm_args, vec![String::from("1"), String::from("2")]);
tracked!(lto, LtoCli::Fat);
tracked!(metadata, vec![String::from("A"), String::from("B")]);
tracked!(no_prepopulate_passes, true);
tracked!(no_redzone, Some(true));
tracked!(no_vectorize_loops, true);
tracked!(no_vectorize_slp, true);
tracked!(opt_level, "3".to_string());
tracked!(overflow_checks, Some(true));
tracked!(panic, Some(PanicStrategy::Abort));
tracked!(passes, vec![String::from("1"), String::from("2")]);
tracked!(prefer_dynamic, true);
tracked!(profile_generate, SwitchWithOptPath::Enabled(None));
tracked!(profile_use, Some(PathBuf::from("abc")));
tracked!(relocation_model, Some(RelocModel::Pic));
tracked!(soft_float, true);
tracked!(split_debuginfo, Some(SplitDebuginfo::Packed));
tracked!(target_cpu, Some(String::from("abc")));
tracked!(target_feature, String::from("all the features, all of them"));
}
#[test]
fn test_debugging_options_tracking_hash() {
let reference = Options::default();
let mut opts = Options::default();
macro_rules! untracked {
($name: ident, $non_default_value: expr) => {
opts.debugging_opts.$name = $non_default_value;
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
};
}
// Make sure that changing an [UNTRACKED] option leaves the hash unchanged.
// This list is in alphabetical order.
untracked!(ast_json, true);
untracked!(ast_json_noexpand, true);
untracked!(borrowck, String::from("other"));
untracked!(deduplicate_diagnostics, true);
untracked!(dep_tasks, true);
untracked!(dont_buffer_diagnostics, true);
untracked!(dump_dep_graph, true);
untracked!(dump_mir, Some(String::from("abc")));
untracked!(dump_mir_dataflow, true);
untracked!(dump_mir_dir, String::from("abc"));
untracked!(dump_mir_exclude_pass_number, true);
untracked!(dump_mir_graphviz, true);
untracked!(emit_future_incompat_report, true);
untracked!(emit_stack_sizes, true);
untracked!(hir_stats, true);
untracked!(identify_regions, true);
untracked!(incremental_ignore_spans, true);
untracked!(incremental_info, true);
untracked!(incremental_verify_ich, true);
untracked!(input_stats, true);
untracked!(keep_hygiene_data, true);
untracked!(link_native_libraries, false);
untracked!(llvm_time_trace, true);
untracked!(ls, true);
untracked!(macro_backtrace, true);
untracked!(meta_stats, true);
untracked!(nll_facts, true);
untracked!(no_analysis, true);
untracked!(no_interleave_lints, true);
untracked!(no_leak_check, true);
untracked!(no_parallel_llvm, true);
untracked!(parse_only, true);
untracked!(perf_stats, true);
// `pre_link_arg` is omitted because it just forwards to `pre_link_args`.
untracked!(pre_link_args, vec![String::from("abc"), String::from("def")]);
untracked!(print_link_args, true);
untracked!(print_llvm_passes, true);
untracked!(print_mono_items, Some(String::from("abc")));
untracked!(print_type_sizes, true);
untracked!(proc_macro_backtrace, true);
untracked!(query_dep_graph, true);
untracked!(query_stats, true);
untracked!(save_analysis, true);
untracked!(self_profile, SwitchWithOptPath::Enabled(None));
untracked!(self_profile_events, Some(vec![String::new()]));
untracked!(span_debug, true);
untracked!(span_free_formats, true);
untracked!(strip, Strip::None);
untracked!(terminal_width, Some(80));
untracked!(threads, 99);
untracked!(time, true);
untracked!(time_llvm_passes, true);
untracked!(time_passes, true);
untracked!(trace_macros, true);
untracked!(trim_diagnostic_paths, false);
untracked!(ui_testing, true);
untracked!(unpretty, Some("expanded".to_string()));
untracked!(unstable_options, true);
untracked!(validate_mir, true);
untracked!(verbose, true);
macro_rules! tracked {
($name: ident, $non_default_value: expr) => {
opts = reference.clone();
opts.debugging_opts.$name = $non_default_value;
assert_ne!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
};
}
// Make sure that changing a [TRACKED] option changes the hash.
// This list is in alphabetical order.
tracked!(allow_features, Some(vec![String::from("lang_items")]));
tracked!(always_encode_mir, true);
tracked!(assume_incomplete_release, true);
tracked!(asm_comments, true);
tracked!(binary_dep_depinfo, true);
tracked!(chalk, true);
tracked!(codegen_backend, Some("abc".to_string()));
tracked!(crate_attr, vec!["abc".to_string()]);
tracked!(debug_macros, true);
tracked!(dep_info_omit_d_target, true);
tracked!(dual_proc_macros, true);
tracked!(fewer_names, Some(true));
tracked!(force_overflow_checks, Some(true));
tracked!(force_unstable_if_unmarked, true);
tracked!(fuel, Some(("abc".to_string(), 99)));
tracked!(function_sections, Some(false));
tracked!(human_readable_cgu_names, true);
tracked!(inline_in_all_cgus, Some(true));
tracked!(inline_mir, Some(true));
tracked!(inline_mir_threshold, Some(123));
tracked!(inline_mir_hint_threshold, Some(123));
tracked!(instrument_coverage, Some(InstrumentCoverage::All));
tracked!(instrument_mcount, true);
tracked!(link_only, true);
tracked!(merge_functions, Some(MergeFunctions::Disabled));
tracked!(mir_emit_retag, true);
tracked!(mir_opt_level, Some(4));
tracked!(mutable_noalias, Some(true));
tracked!(new_llvm_pass_manager, true);
tracked!(no_codegen, true);
tracked!(no_generate_arange_section, true);
tracked!(no_link, true);
tracked!(no_profiler_runtime, true);
tracked!(osx_rpath_install_name, true);
tracked!(panic_abort_tests, true);
tracked!(plt, Some(true));
tracked!(polonius, true);
tracked!(precise_enum_drop_elaboration, false);
tracked!(print_fuel, Some("abc".to_string()));
tracked!(profile, true);
tracked!(profile_emit, Some(PathBuf::from("abc")));
tracked!(relax_elf_relocations, Some(true));
tracked!(relro_level, Some(RelroLevel::Full));
tracked!(report_delayed_bugs, true);
tracked!(sanitizer, SanitizerSet::ADDRESS);
tracked!(sanitizer_memory_track_origins, 2);
tracked!(sanitizer_recover, SanitizerSet::ADDRESS);
tracked!(saturating_float_casts, Some(true));
tracked!(share_generics, Some(true));
tracked!(show_span, Some(String::from("abc")));
tracked!(src_hash_algorithm, Some(SourceFileHashAlgorithm::Sha1));
tracked!(symbol_mangling_version, Some(SymbolManglingVersion::V0));
tracked!(teach, true);
tracked!(thinlto, Some(true));
tracked!(tune_cpu, Some(String::from("abc")));
tracked!(tls_model, Some(TlsModel::GeneralDynamic));
tracked!(trap_unreachable, Some(false));
tracked!(treat_err_as_bug, NonZeroUsize::new(1));
tracked!(unleash_the_miri_inside_of_you, true);
tracked!(use_ctors_section, Some(true));
tracked!(verify_llvm_ir, true);
tracked!(wasi_exec_model, Some(WasiExecModel::Reactor));
}
#[test]
fn test_edition_parsing() {
// test default edition
let options = Options::default();
assert!(options.edition == DEFAULT_EDITION);
let matches = optgroups().parse(&["--edition=2018".to_string()]).unwrap();
let (sessopts, _) = build_session_options_and_crate_config(matches);
assert!(sessopts.edition == Edition::Edition2018)
}
| 39.163961 | 100 | 0.669762 |
8f2d42ff41a5cdd7c4024af7cd72c9d83effbcd9 | 28,730 | use crate::slruntimefacade::{StarlightJsRuntimeFacade, StarlightJsRuntimeFacadeInner};
use hirofa_utils::auto_id_map::AutoIdMap;
use hirofa_utils::eventloop::EventLoop;
use hirofa_utils::js_utils::adapters::proxies::JsProxy;
use hirofa_utils::js_utils::adapters::{
JsPromiseAdapter, JsRealmAdapter, JsRuntimeAdapter, JsValueAdapter,
};
use hirofa_utils::js_utils::facades::JsValueType;
use hirofa_utils::js_utils::{JsError, Script};
use starlight::gc::cell::GcPointer;
use starlight::options::Options;
use starlight::prelude::{Internable, JsArray, JsString};
use starlight::vm::arguments::Arguments;
use starlight::vm::class::JsClass;
use starlight::vm::context::Context;
use starlight::vm::function::JsClosureFunction;
use starlight::vm::object::JsObject;
use starlight::vm::promise::JsPromise;
use starlight::vm::value::JsValue;
use starlight::vm::{PersistentRooted, VirtualMachineRef};
use starlight::Platform;
use std::cell::RefCell;
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::Weak;
pub struct StarlightJsValueAdapter {
js_value: JsValue,
realm_id: Option<String>,
}
impl PartialEq for StarlightJsValueAdapter {
fn eq(&self, other: &Self) -> bool {
self.js_value == other.js_value
}
}
// todo: still not sure if a want to wrap JsValue or just impl JsValueAdapter for JsValue
// todo just impl adapter for JsValue, on clone create a PersistentRooted, impl valueFacade for persistentrooted as well
impl StarlightJsValueAdapter {
pub fn new(js_value: JsValue, realm_id: Option<String>) -> Self {
Self { js_value, realm_id }
}
pub fn null() -> Self {
Self::new(JsValue::encode_null_value(), None)
}
pub fn undefined() -> Self {
Self::new(JsValue::encode_undefined_value(), None)
}
}
impl Clone for StarlightJsValueAdapter {
fn clone(&self) -> Self {
Self {
realm_id: self.realm_id.clone(),
js_value: self.js_value.clone(),
}
}
}
thread_local! {
static SL_RT: RefCell<Option<StarlightJsRuntimeAdapter >> = RefCell::new(None);
}
impl JsValueAdapter for StarlightJsValueAdapter {
type JsRuntimeAdapterType = StarlightJsRuntimeAdapter;
fn js_get_type(&self) -> JsValueType {
// todo figure out other types
if self.js_value.is_undefined() {
JsValueType::Undefined
} else if self.is_null() {
JsValueType::Null
} else if self.is_int32() {
JsValueType::I32
} else if self.is_number() {
JsValueType::F64
} else if self.is_callable() {
JsValueType::Function
} else if self.is_object() {
let jso = self.get_jsobject();
if jso.is_class(JsPromise::class()) {
JsValueType::Promise
} else if jso.is_class(JsArray::class()) {
JsValueType::Array
} else {
JsValueType::Object
}
} else if self.is_string() {
JsValueType::String
} else if self.is_symbol() {
todo!()
} else {
todo!()
}
}
fn js_type_of(&self) -> &'static str {
self.type_of()
}
fn js_to_bool(&self) -> bool {
self.to_boolean()
}
fn js_to_i32(&self) -> i32 {
self.get_int32()
}
fn js_to_f64(&self) -> f64 {
self.get_double()
}
fn js_to_string(&self) -> Result<String, JsError> {
assert!(self.js_get_type() == JsValueType::String);
StarlightJsRuntimeAdapter::do_with(|sl_rt| {
let realm = sl_rt
.js_get_realm(self.realm_id.as_ref().unwrap().as_str())
.unwrap();
self.to_string(realm.ctx).map_err(|e| {
JsError::new_string(e.to_string(realm.ctx).ok().expect("conversion failed"))
})
})
}
}
impl Deref for StarlightJsValueAdapter {
type Target = JsValue;
fn deref(&self) -> &Self::Target {
&self.js_value
}
}
pub struct StarlightJsRuntimeAdapter {
sl_rt: RefCell<VirtualMachineRef>,
realms: HashMap<String, StarlightJsRealmAdapter>,
rtf_inner_ref: Weak<StarlightJsRuntimeFacadeInner>,
}
pub struct StarlightJsPromiseAdapter {}
impl Clone for StarlightJsPromiseAdapter {
fn clone(&self) -> Self {
Self {}
}
}
impl JsPromiseAdapter<StarlightJsRealmAdapter> for StarlightJsPromiseAdapter {
fn js_promise_resolve(
&self,
_context: &StarlightJsRealmAdapter,
_resolution: &StarlightJsValueAdapter,
) -> Result<(), JsError> {
todo!()
}
fn js_promise_reject(
&self,
_context: &StarlightJsRealmAdapter,
_rejection: &StarlightJsValueAdapter,
) -> Result<(), JsError> {
todo!()
}
fn js_promise_get_value(
&self,
_realm: &StarlightJsRealmAdapter,
) -> <StarlightJsRealmAdapter as JsRealmAdapter>::JsValueAdapterType {
todo!()
}
}
impl JsRuntimeAdapter for StarlightJsRuntimeAdapter {
type JsRealmAdapterType = StarlightJsRealmAdapter;
type JsRuntimeFacadeType = StarlightJsRuntimeFacade;
fn js_load_module_script(&self, _ref_path: &str, _path: &str) -> Option<Script> {
todo!()
}
fn js_create_realm(&mut self, id: &str) -> Result<&Self::JsRealmAdapterType, JsError> {
let rt = &mut self.sl_rt.borrow_mut();
let ctx = Context::new(rt);
drop(rt);
let realm = StarlightJsRealmAdapter::new(id.to_string(), ctx);
self.realms.insert(id.to_string(), realm);
Ok(self.js_get_realm(id).unwrap())
}
fn js_get_realm(&self, id: &str) -> Option<&Self::JsRealmAdapterType> {
log::trace!("slrta.js_get_realm {}", id);
self.realms.get(id)
}
fn js_get_main_realm(&self) -> &Self::JsRealmAdapterType {
log::trace!("slrta.js_get_main_realm");
self.js_get_realm("__main__")
.expect("invalid state, main realm not yet created")
}
fn js_add_realm_init_hook<H>(&mut self, _hook: H) -> Result<(), JsError>
where
H: Fn(&Self, &Self::JsRealmAdapterType) -> Result<(), JsError> + 'static,
{
todo!()
}
}
impl StarlightJsRuntimeAdapter {
fn new(rtf_inner_ref: Weak<StarlightJsRuntimeFacadeInner>) -> Self {
let mut options = Options::default();
options.verbose_gc = true;
let sl_rt = Platform::new_runtime(options, None).with_async_scheduler(Box::new(|job| {
EventLoop::add_local_void(move || {
StarlightJsRuntimeAdapter::do_with(|sl_rt| {
let mut_rt = &mut *sl_rt.sl_rt.borrow_mut();
// todo with_async_scheduler should pass a func with an ctx idx as first arg?
let ctx = mut_rt.context(0);
job(ctx);
})
})
}));
let mut ret = Self {
sl_rt: RefCell::new(sl_rt),
realms: Default::default(),
rtf_inner_ref,
};
ret.js_create_realm("__main__")
.ok()
.expect("create main realm failed");
ret
}
pub(crate) fn init(rtf_inner_ref: Weak<StarlightJsRuntimeFacadeInner>) {
SL_RT.with(|rc| {
let opt = &mut *rc.borrow_mut();
opt.replace(StarlightJsRuntimeAdapter::new(rtf_inner_ref));
});
}
pub fn do_with<C: FnOnce(&StarlightJsRuntimeAdapter) -> R, R>(consumer: C) -> R {
SL_RT.with(|rc| {
let rt = &*rc.borrow();
consumer(rt.as_ref().expect("do_with called before init"))
})
}
pub fn do_with_mut<C: FnOnce(&mut StarlightJsRuntimeAdapter) -> R, R>(consumer: C) -> R {
SL_RT.with(|rc| {
let rt = &mut *rc.borrow_mut();
consumer(rt.as_mut().expect("do_with called before init"))
})
}
}
pub struct StarlightJsRealmAdapter {
realm_name: String,
ctx: GcPointer<Context>,
object_cache: RefCell<AutoIdMap<PersistentRooted>>,
//promise_cache: RefCell<AutoIdMap<Box<dyn JsPromiseAdapter<Self>>>>,
}
impl StarlightJsRealmAdapter {
fn new(realm_name: String, ctx: GcPointer<Context>) -> Self {
Self {
realm_name,
ctx,
object_cache: RefCell::new(AutoIdMap::new_with_max_size(i32::MAX as usize)),
}
}
}
impl JsRealmAdapter for StarlightJsRealmAdapter {
type JsRuntimeAdapterType = StarlightJsRuntimeAdapter;
type JsValueAdapterType = StarlightJsValueAdapter;
fn js_get_realm_id(&self) -> &str {
self.realm_name.as_str()
}
fn js_get_runtime_facade_inner(&self) -> Weak<StarlightJsRuntimeFacadeInner> {
StarlightJsRuntimeAdapter::do_with(|rta| rta.rtf_inner_ref.clone())
}
fn js_get_script_or_module_name(&self) -> Result<String, JsError> {
todo!()
}
fn js_eval(&self, script: Script) -> Result<StarlightJsValueAdapter, JsError> {
log::trace!("> StarlightJsRealmAdapter.js_eval: {}", script.get_path());
self.ctx
.eval_internal(Some(script.get_path()), true, script.get_code(), true)
.map(|jsval| StarlightJsValueAdapter::new(jsval, Some(self.realm_name.clone())))
.map_err(|jsval| {
JsError::new_str(jsval.to_string(self.ctx).ok().expect("poof").as_str())
})
}
fn js_proxy_install(
&self,
_proxy: JsProxy<Self>,
_add_global_var: bool,
) -> Result<Self::JsValueAdapterType, JsError>
where
Self: Sized,
{
todo!()
}
fn js_proxy_instantiate(
&self,
_namespace: &[&str],
_class_name: &str,
_arguments: &[Self::JsValueAdapterType],
) -> Result<(usize, Self::JsValueAdapterType), JsError> {
todo!()
}
fn js_proxy_dispatch_event(
&self,
_namespace: &[&str],
_class_name: &str,
_proxy_instance_id: &usize,
_event_id: &str,
_event_obj: &Self::JsValueAdapterType,
) -> Result<bool, JsError> {
todo!()
}
fn js_proxy_dispatch_static_event(
&self,
_namespace: &[&str],
_class_name: &str,
_event_id: &str,
_event_obj: &Self::JsValueAdapterType,
) -> Result<bool, JsError> {
todo!()
}
fn js_install_function(
&self,
_namespace: &[&str],
_name: &str,
_js_function: fn(
&StarlightJsRuntimeAdapter,
&Self,
&StarlightJsValueAdapter,
&[StarlightJsValueAdapter],
) -> Result<StarlightJsValueAdapter, JsError>,
_arg_count: u32,
) -> Result<(), JsError> {
todo!()
/*
let rti = self.sl_rt.upgrade().expect("wtf");
let func_wrapper: JsAPI = move |rt: &mut Runtime, args: &Arguments| {
StarlightJsRuntimeAdapter::do_with(|sl_rt| {
// todo when sl supports Realms, get current realm from sl api
let realm = sl_rt.js_get_main_realm();
let this: StarlightJsValueAdapter = StarlightJsValueAdapter::new(args.this);
let args: Vec<StarlightJsValueAdapter> = args
.values
.iter()
.map(|arg| StarlightJsValueAdapter::new(*arg))
.collect();
let res = js_function(realm, &this, args.as_slice());
res.map(|v| v.0).map_err(|e| {
let vm = &mut *sl_rt.inner.sl_rt.borrow_mut();
let err_str = format!("{}", e);
JsValue::encode_object_value(JsString::new(vm, err_str.as_str()))
})
})
};
let ns = self.js_get_namespace(namespace)?;
let sl_rt = &mut *rti.sl_rt.borrow_mut();
let name_symbol = name.intern();
let m = starlight::vm::function::JsNativeFunction::new(
sl_rt,
name_symbol,
func_wrapper,
arg_count,
);
ns.0.get_jsobject()
.put(sl_rt, name_symbol, JsValue::new(m), true)
.map_err(|err| {
JsError::new_string(
err.to_string(sl_rt)
.ok()
.unwrap_or("could not get string from err".to_string()),
)
})?;
Ok(())
*/
}
fn js_install_closure<
F: Fn(
&StarlightJsRuntimeAdapter,
&Self,
&StarlightJsValueAdapter,
&[StarlightJsValueAdapter],
) -> Result<StarlightJsValueAdapter, JsError>
+ 'static,
>(
&self,
namespace: &[&str],
name: &str,
js_function: F,
arg_count: u32,
) -> Result<(), JsError> {
let realm_name = self.realm_name.clone();
let func_wrapper = move |_ctx: GcPointer<starlight::vm::context::Context>,
args: &Arguments| {
StarlightJsRuntimeAdapter::do_with(|sl_rt| {
let realm = sl_rt
.js_get_realm(realm_name.as_str())
.expect("realm get failed");
let this: StarlightJsValueAdapter =
StarlightJsValueAdapter::new(args.this, Some(realm_name.clone()));
let args: Vec<StarlightJsValueAdapter> = args
.values
.iter()
.map(|arg| StarlightJsValueAdapter::new(*arg, Some(realm_name.clone())))
.collect();
let res = js_function(sl_rt, realm, &this, args.as_slice());
res.map(|v| v.js_value).map_err(|e| {
let err_str = format!("{}", e);
JsValue::encode_object_value(JsString::new(realm.ctx, err_str.as_str()))
})
})
};
let ns = self.js_get_namespace(namespace)?;
let name_symbol = name.intern();
let m = starlight::vm::function::JsClosureFunction::new(
self.ctx,
name_symbol,
func_wrapper,
arg_count,
);
ns.js_value
.get_jsobject()
.put(self.ctx, name_symbol, JsValue::new(m), true)
.map_err(|err| {
JsError::new_string(
err.to_string(self.ctx)
.ok()
.unwrap_or("could not get string from err".to_string()),
)
})?;
Ok(())
}
fn js_eval_module(&self, _script: Script) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_get_namespace(&self, namespace: &[&str]) -> Result<StarlightJsValueAdapter, JsError> {
let ctx = self.ctx;
let obj = ctx.global_object();
let mut obj_val = JsValue::new(obj);
for part in namespace {
let mut sub = obj_val
.get_jsobject()
.get(self.ctx, part.intern())
.map_err(|e| {
JsError::new_string(e.to_string(self.ctx).ok().expect("conversion failed"))
})?;
if sub.is_null() || sub.is_undefined() {
sub = JsValue::new(JsObject::new_empty(self.ctx));
obj_val
.get_jsobject()
.put(self.ctx, part.intern(), sub, false)
.map_err(|e| {
JsError::new_string(e.to_string(self.ctx).ok().expect("conversion failed"))
})?;
}
obj_val = sub;
}
Ok(StarlightJsValueAdapter::new(
obj_val,
Some(self.realm_name.clone()),
))
}
/// invoke a js function
/// # Example
/// ```rust
/// use starlight_runtime::slruntimefacade::StarlightRuntimeBuilder;
/// use hirofa_utils::js_utils::facades::{JsRuntimeBuilder, JsRuntimeFacade};
/// use hirofa_utils::js_utils::adapters::JsRealmAdapter;
/// use hirofa_utils::js_utils::{Script, JsError};
/// use starlight_runtime::slruntimeadapter::StarlightJsValueAdapter;
/// use starlight::prelude::JsValue;
/// let sl_rt = StarlightRuntimeBuilder::new().js_build();
/// sl_rt.js_loop_realm_sync(None, |rt, realm|{
/// realm.js_eval(Script::new("test_js_function_invoke.js", "globalThis.myns = {f1: function(a, b) {return a * b;}}")).ok().expect("script failed");
/// let args = vec![
/// StarlightJsValueAdapter::new(JsValue::encode_int32(7), None),
/// StarlightJsValueAdapter::new(JsValue::encode_int32(3), None)
/// ];
/// let res = realm.js_function_invoke_by_name(&["myns"], "f1", args.as_slice());
/// match res {
/// Ok(val) => {
/// assert_eq!(val.get_int32(), 21);
/// }
/// Err(err) => {
/// panic!("f1 failed: {}", err);
/// }
/// }
/// });
/// ```
fn js_function_invoke_by_name(
&self,
namespace: &[&str],
method_name: &str,
args: &[StarlightJsValueAdapter],
) -> Result<StarlightJsValueAdapter, JsError> {
let ns = self.js_get_namespace(namespace)?;
let method_val = ns
.get_jsobject()
.get(self.ctx, method_name.intern())
.map_err(|err_val| {
JsError::new_string(err_val.to_string(self.ctx).ok().expect("conversion failed"))
})?;
let this_val = *ns;
let mut method_obj = method_val.get_jsobject();
let method_func = method_obj.as_function_mut();
let mut args_vec: Vec<JsValue> = args.iter().map(|jsva| jsva.js_value).collect();
let mut arguments = Arguments::new(this_val, args_vec.as_mut_slice());
method_func
.call(self.ctx, &mut arguments, this_val)
.map(|jsval| StarlightJsValueAdapter::new(jsval, Some(self.realm_name.clone())))
.map_err(|err_val| {
JsError::new_string(err_val.to_string(self.ctx).ok().expect("conversion failed"))
})
}
fn js_function_invoke_member_by_name(
&self,
_this_obj: &StarlightJsValueAdapter,
_method_name: &str,
_args: &[StarlightJsValueAdapter],
) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_function_invoke(
&self,
_this_obj: Option<&StarlightJsValueAdapter>,
_function_obj: &StarlightJsValueAdapter,
_args: &[StarlightJsValueAdapter],
) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_function_create<
F: Fn(
&Self,
&StarlightJsValueAdapter,
&[StarlightJsValueAdapter],
) -> Result<StarlightJsValueAdapter, JsError>
+ 'static,
>(
&self,
name: &str,
js_function: F,
arg_count: u32,
) -> Result<StarlightJsValueAdapter, JsError> {
//
let realm_id = self.realm_name.clone();
let func = JsClosureFunction::new(
self.ctx,
name.intern(),
move |ctx, args| {
//
StarlightJsRuntimeAdapter::do_with(|sl_rta| {
let realm = sl_rta
.js_get_realm(realm_id.as_str())
.expect("invalid state");
let this = StarlightJsValueAdapter::new(args.this, Some(realm_id.clone()));
let args_vec: Vec<StarlightJsValueAdapter> = args
.values
.iter()
.map(|v| StarlightJsValueAdapter::new(*v, Some(realm_id.clone())))
.collect();
js_function(realm, &this, args_vec.as_slice())
.map(|res| res.js_value)
.map_err(|e| {
let err_val = realm
.js_string_create(format!("{}", e).as_str())
.ok()
.expect("string creation failed");
err_val.js_value
})
})
},
arg_count,
);
let val = JsValue::encode_object_value(func);
Ok(StarlightJsValueAdapter::new(
val,
Some(self.realm_name.clone()),
))
}
fn js_object_delete_property(
&self,
_object: &StarlightJsValueAdapter,
_property_name: &str,
) -> Result<(), JsError> {
todo!()
}
fn js_object_set_property(
&self,
_object: &StarlightJsValueAdapter,
_property_name: &str,
_property: &StarlightJsValueAdapter,
) -> Result<(), JsError> {
todo!()
}
fn js_object_get_property(
&self,
_object: &StarlightJsValueAdapter,
_property_name: &str,
) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_object_create(&self) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_object_construct(
&self,
_constructor: &StarlightJsValueAdapter,
_args: &[StarlightJsValueAdapter],
) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_object_get_properties(
&self,
_object: &StarlightJsValueAdapter,
) -> Result<Vec<String>, JsError> {
todo!()
}
fn js_object_traverse<F, R>(
&self,
_object: &StarlightJsValueAdapter,
_visitor: F,
) -> Result<Vec<R>, JsError>
where
F: Fn(&str, &StarlightJsValueAdapter) -> Result<R, JsError>,
{
todo!()
}
fn js_object_traverse_mut<F>(
&self,
_object: &Self::JsValueAdapterType,
_visitor: F,
) -> Result<(), JsError>
where
F: FnMut(&str, &Self::JsValueAdapterType) -> Result<(), JsError>,
{
todo!()
}
fn js_array_get_element(
&self,
_array: &StarlightJsValueAdapter,
_index: u32,
) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_array_set_element(
&self,
_array: &Self::JsValueAdapterType,
_index: u32,
_element: &Self::JsValueAdapterType,
) -> Result<(), JsError> {
todo!()
}
fn js_array_get_length(&self, _array: &StarlightJsValueAdapter) -> Result<u32, JsError> {
todo!()
}
fn js_array_create(&self) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
fn js_array_traverse<F, R>(
&self,
_array: &StarlightJsValueAdapter,
_visitor: F,
) -> Result<Vec<R>, JsError>
where
F: Fn(u32, &StarlightJsValueAdapter) -> Result<R, JsError>,
{
todo!()
}
fn js_array_traverse_mut<F>(
&self,
_array: &Self::JsValueAdapterType,
_visitor: F,
) -> Result<(), JsError>
where
F: FnMut(u32, &Self::JsValueAdapterType) -> Result<(), JsError>,
{
todo!()
}
fn js_null_create(&self) -> Result<StarlightJsValueAdapter, JsError> {
Ok(StarlightJsValueAdapter::null())
}
fn js_undefined_create(&self) -> Result<StarlightJsValueAdapter, JsError> {
Ok(StarlightJsValueAdapter::undefined())
}
fn js_i32_create(&self, val: i32) -> Result<StarlightJsValueAdapter, JsError> {
Ok(StarlightJsValueAdapter::new(
JsValue::encode_int32(val),
None,
))
}
fn js_string_create(&self, val: &str) -> Result<StarlightJsValueAdapter, JsError> {
Ok(StarlightJsValueAdapter::new(
JsValue::encode_object_value(JsString::new(self.ctx, val)),
Some(self.realm_name.clone()),
))
}
fn js_boolean_create(&self, val: bool) -> Result<StarlightJsValueAdapter, JsError> {
Ok(StarlightJsValueAdapter::new(
JsValue::encode_bool_value(val),
None,
))
}
fn js_f64_create(&self, val: f64) -> Result<StarlightJsValueAdapter, JsError> {
Ok(StarlightJsValueAdapter::new(
JsValue::encode_f64_value(val),
None,
))
}
fn js_promise_create(&self) -> Result<Box<dyn JsPromiseAdapter<Self>>, JsError> {
todo!()
}
fn js_promise_add_reactions(
&self,
promise: &StarlightJsValueAdapter,
then: Option<StarlightJsValueAdapter>,
catch: Option<StarlightJsValueAdapter>,
finally: Option<StarlightJsValueAdapter>,
) -> Result<(), JsError> {
//
let val = promise.js_value;
let mut obj = val.get_jsobject();
let js_promise = obj.as_promise_mut();
let res: Result<JsValue, JsValue> = js_promise.then(
self.ctx,
val,
then.map(|jsva| jsva.js_value),
catch.map(|jsva| jsva.js_value),
finally.map(|jsva| jsva.js_value),
);
match res {
Ok(_) => Ok(()),
Err(err_val) => Err(JsError::new_string(
err_val.to_string(self.ctx).expect("string conv failed"),
)),
}
}
fn js_promise_cache_add(&self, _promise_ref: Box<dyn JsPromiseAdapter<Self>>) -> usize {
todo!()
}
fn js_promise_cache_consume(&self, _id: usize) -> Box<dyn JsPromiseAdapter<Self>> {
todo!()
}
fn js_cache_add(&self, object: &Self::JsValueAdapterType) -> i32 {
StarlightJsRuntimeAdapter::do_with(|sl_rta| {
let rt = &mut sl_rta.sl_rt.borrow_mut();
let pr = rt.add_persistent_root(object.js_value);
self.object_cache.borrow_mut().insert(pr) as i32
})
}
fn js_cache_dispose(&self, id: i32) {
let _ = self.object_cache.borrow_mut().remove(&(id as usize));
}
fn js_cache_with<C, R>(&self, id: i32, consumer: C) -> R
where
C: FnOnce(&StarlightJsValueAdapter) -> R,
{
let cloned_pr = {
let cache = &*self.object_cache.borrow();
let pr = cache.get(&(id as usize)).expect("no such obj in cache");
StarlightJsRuntimeAdapter::do_with(|sl_rta| {
let sl_rt = &mut *sl_rta.sl_rt.borrow_mut();
sl_rt.add_persistent_root(pr.get_value())
})
};
let va = StarlightJsValueAdapter::new(cloned_pr.get_value(), Some(self.realm_name.clone()));
consumer(&va)
}
fn js_cache_consume(&self, _id: i32) -> StarlightJsValueAdapter {
todo!()
}
fn js_instance_of(
&self,
_object: &StarlightJsValueAdapter,
_constructor: &StarlightJsValueAdapter,
) -> bool {
todo!()
}
fn js_json_stringify(
&self,
_object: &StarlightJsValueAdapter,
_opt_space: Option<&str>,
) -> Result<String, JsError> {
todo!()
}
fn js_json_parse(&self, _json_string: &str) -> Result<StarlightJsValueAdapter, JsError> {
todo!()
}
}
#[cfg(test)]
pub mod tests {
use crate::slruntimeadapter::{StarlightJsRuntimeAdapter, StarlightJsValueAdapter};
use crate::slruntimefacade::StarlightRuntimeBuilder;
use futures::executor::block_on;
use hirofa_utils::js_utils::adapters::{JsRealmAdapter, JsRuntimeAdapter, JsValueAdapter};
use hirofa_utils::js_utils::facades::values::JsValueFacade;
use hirofa_utils::js_utils::facades::{JsRuntimeBuilder, JsRuntimeFacade, JsValueType};
use hirofa_utils::js_utils::{JsError, Script};
#[test]
fn test_func() {
let rtf = StarlightRuntimeBuilder::new().js_build();
rtf.js_loop_realm_sync(None, |_rt, realm| {
realm
.js_install_closure(
&["com", "hirofa"],
"testFunc",
|_runtime, realm, _this, _args| {
return realm.js_i32_create(123);
},
1,
)
.ok()
.expect("install failed");
let res = realm.js_eval(Script::new(
"test.js",
"return com.hirofa.testFunc(1, true, 'abc');",
));
match res {
Ok(val) => {
println!("typeof:{}", val.js_type_of());
assert!(val.js_get_type() == JsValueType::I32);
assert_eq!(val.js_to_i32(), 123);
}
Err(e) => {
panic!("err: {}", e);
}
}
});
}
}
| 31.194354 | 156 | 0.556213 |
569043e02292df2c5ea099df511c12a75c13e780 | 6,679 | use num_complex::Complex;
use num_traits::{Float, One, Zero};
use rand::distributions::{uniform::SampleUniform, Distribution, Uniform};
use rand::{rngs::StdRng, SeedableRng};
use crate::{algorithm::Dft, Direction, FftNum, Length};
use crate::{Fft, FftDirection};
/// The seed for the random number generator used to generate
/// random signals. It's defined here so that we have deterministic
/// tests
const RNG_SEED: [u8; 32] = [
1, 9, 1, 0, 1, 1, 4, 3, 1, 4, 9, 8, 4, 1, 4, 8, 2, 8, 1, 2, 2, 2, 6, 1, 2, 3, 4, 5, 6, 7, 8, 9,
];
pub fn random_signal<T: FftNum + SampleUniform>(length: usize) -> Vec<Complex<T>> {
let mut sig = Vec::with_capacity(length);
let normal_dist: Uniform<T> = Uniform::new(T::zero(), T::from_f32(10.0).unwrap());
let mut rng: StdRng = SeedableRng::from_seed(RNG_SEED);
for _ in 0..length {
sig.push(Complex {
re: normal_dist.sample(&mut rng),
im: normal_dist.sample(&mut rng),
});
}
return sig;
}
pub fn compare_vectors<T: FftNum + Float>(vec1: &[Complex<T>], vec2: &[Complex<T>]) -> bool {
assert_eq!(vec1.len(), vec2.len());
let mut error = T::zero();
for (&a, &b) in vec1.iter().zip(vec2.iter()) {
error = error + (a - b).norm();
}
return (error.to_f64().unwrap() / vec1.len() as f64) < 0.1f64;
}
#[allow(unused)]
fn transppose_diagnostic<T: FftNum + Float>(expected: &[Complex<T>], actual: &[Complex<T>]) {
for (i, (&e, &a)) in expected.iter().zip(actual.iter()).enumerate() {
if (e - a).norm().to_f32().unwrap() > 0.01 {
if let Some(found_index) = expected
.iter()
.position(|&ev| (ev - a).norm().to_f32().unwrap() < 0.01)
{
println!("{} incorrectly contained {}", i, found_index);
} else {
println!("{} X", i);
}
}
}
}
pub fn check_fft_algorithm<T: FftNum + Float + SampleUniform>(
fft: &dyn Fft<T>,
len: usize,
direction: FftDirection,
) {
assert_eq!(
fft.len(),
len,
"Algorithm reported incorrect size. Expected {}, got {}",
len,
fft.len()
);
assert_eq!(
fft.fft_direction(),
direction,
"Algorithm reported incorrect FFT direction"
);
let n = 3;
//test the forward direction
let dft = Dft::new(len, direction);
let dirty_scratch_value = Complex::one() * T::from_i32(100).unwrap();
// set up buffers
let reference_input = random_signal(len * n);
let mut expected_output = reference_input.clone();
let mut dft_scratch = vec![Zero::zero(); dft.get_inplace_scratch_len()];
dft.process_with_scratch(&mut expected_output, &mut dft_scratch);
// test process()
{
let mut buffer = reference_input.clone();
fft.process(&mut buffer);
assert!(
compare_vectors(&expected_output, &buffer),
"process() failed, length = {}, direction = {}, result = {:?}, expected = {:?}",
len,
direction,
buffer,
expected_output
);
}
// test process_with_scratch()
{
let mut buffer = reference_input.clone();
let mut scratch = vec![Zero::zero(); fft.get_inplace_scratch_len()];
fft.process_with_scratch(&mut buffer, &mut scratch);
assert!(
compare_vectors(&expected_output, &buffer),
"process_with_scratch() failed, length = {}, direction = {}",
len,
direction
);
// make sure this algorithm works correctly with dirty scratch
if scratch.len() > 0 {
for item in scratch.iter_mut() {
*item = dirty_scratch_value;
}
buffer.copy_from_slice(&reference_input);
fft.process_with_scratch(&mut buffer, &mut scratch);
assert!(compare_vectors(&expected_output, &buffer), "process_with_scratch() failed the 'dirty scratch' test, length = {}, direction = {}", len, direction);
}
}
// test process_outofplace_with_scratch()
{
let mut input = reference_input.clone();
let mut scratch = vec![Zero::zero(); fft.get_outofplace_scratch_len()];
let mut output = expected_output.clone();
fft.process_outofplace_with_scratch(&mut input, &mut output, &mut scratch);
assert!(
compare_vectors(&expected_output, &output),
"process_outofplace_with_scratch() failed, length = {}, direction = {}",
len,
direction
);
// make sure this algorithm works correctly with dirty scratch
if scratch.len() > 0 {
for item in scratch.iter_mut() {
*item = dirty_scratch_value;
}
input.copy_from_slice(&reference_input);
fft.process_outofplace_with_scratch(&mut input, &mut output, &mut scratch);
assert!(
compare_vectors(&expected_output, &output),
"process_outofplace_with_scratch() failed the 'dirty scratch' test, length = {}, direction = {}",
len,
direction
);
}
}
}
// A fake FFT algorithm that requests much more scratch than it needs. You can use this as an inner FFT to other algorithms to test their scratch-supplying logic
#[derive(Debug)]
pub struct BigScratchAlgorithm {
pub len: usize,
pub inplace_scratch: usize,
pub outofplace_scratch: usize,
pub direction: FftDirection,
}
impl<T: FftNum> Fft<T> for BigScratchAlgorithm {
fn process_with_scratch(&self, _buffer: &mut [Complex<T>], scratch: &mut [Complex<T>]) {
assert!(
scratch.len() >= self.inplace_scratch,
"Not enough inplace scratch provided, self={:?}, provided scratch={}",
&self,
scratch.len()
);
}
fn process_outofplace_with_scratch(
&self,
_input: &mut [Complex<T>],
_output: &mut [Complex<T>],
scratch: &mut [Complex<T>],
) {
assert!(
scratch.len() >= self.outofplace_scratch,
"Not enough OOP scratch provided, self={:?}, provided scratch={}",
&self,
scratch.len()
);
}
fn get_inplace_scratch_len(&self) -> usize {
self.inplace_scratch
}
fn get_outofplace_scratch_len(&self) -> usize {
self.outofplace_scratch
}
}
impl Length for BigScratchAlgorithm {
fn len(&self) -> usize {
self.len
}
}
impl Direction for BigScratchAlgorithm {
fn fft_direction(&self) -> FftDirection {
self.direction
}
}
| 31.504717 | 167 | 0.576134 |
5dee498d6b17202cfd8ffdf8d1fe5607397d17d4 | 664 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn f() {
let x = 10; let mut y = 11;
if true { match x { _ => { y = x; } } } else { }
}
fn main() {
let x = 10;
let mut y = 11;
if true { while false { y = x; } } else { }
}
| 28.869565 | 68 | 0.644578 |
56eb5f874cd618fc5d582e7947e92b0596a96e31 | 564 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
const x: () = {
return; //~ ERROR return statement outside of function body
};
fn main() {}
| 35.25 | 68 | 0.721631 |
2fcd06330bc4ece15e0abf7d5159114dc0c62e2e | 85,836 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
client: aws_smithy_client::Client<C, M, R>,
conf: crate::Config,
}
/// An ergonomic service client for `Synthetics`.
///
/// This client allows ergonomic access to a `Synthetics`-shaped service.
/// Each method corresponds to an endpoint defined in the service's Smithy model,
/// and the request and response shapes are auto-generated from that same model.
///
/// # Using a Client
///
/// Once you have a client set up, you can access the service's endpoints
/// by calling the appropriate method on [`Client`]. Each such method
/// returns a request builder for that endpoint, with methods for setting
/// the various fields of the request. Once your request is complete, use
/// the `send` method to send the request. `send` returns a future, which
/// you then have to `.await` to get the service's response.
///
/// [builder pattern]: https://rust-lang.github.io/api-guidelines/type-safety.html#c-builder
/// [SigV4-signed requests]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the `CreateCanary` operation.
///
/// See [`CreateCanary`](crate::client::fluent_builders::CreateCanary) for more information about the
/// operation and its arguments.
pub fn create_canary(&self) -> fluent_builders::CreateCanary<C, M, R> {
fluent_builders::CreateCanary::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteCanary` operation.
///
/// See [`DeleteCanary`](crate::client::fluent_builders::DeleteCanary) for more information about the
/// operation and its arguments.
pub fn delete_canary(&self) -> fluent_builders::DeleteCanary<C, M, R> {
fluent_builders::DeleteCanary::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeCanaries` operation.
///
/// See [`DescribeCanaries`](crate::client::fluent_builders::DescribeCanaries) for more information about the
/// operation and its arguments.
pub fn describe_canaries(&self) -> fluent_builders::DescribeCanaries<C, M, R> {
fluent_builders::DescribeCanaries::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeCanariesLastRun` operation.
///
/// See [`DescribeCanariesLastRun`](crate::client::fluent_builders::DescribeCanariesLastRun) for more information about the
/// operation and its arguments.
pub fn describe_canaries_last_run(&self) -> fluent_builders::DescribeCanariesLastRun<C, M, R> {
fluent_builders::DescribeCanariesLastRun::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeRuntimeVersions` operation.
///
/// See [`DescribeRuntimeVersions`](crate::client::fluent_builders::DescribeRuntimeVersions) for more information about the
/// operation and its arguments.
pub fn describe_runtime_versions(&self) -> fluent_builders::DescribeRuntimeVersions<C, M, R> {
fluent_builders::DescribeRuntimeVersions::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetCanary` operation.
///
/// See [`GetCanary`](crate::client::fluent_builders::GetCanary) for more information about the
/// operation and its arguments.
pub fn get_canary(&self) -> fluent_builders::GetCanary<C, M, R> {
fluent_builders::GetCanary::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetCanaryRuns` operation.
///
/// See [`GetCanaryRuns`](crate::client::fluent_builders::GetCanaryRuns) for more information about the
/// operation and its arguments.
pub fn get_canary_runs(&self) -> fluent_builders::GetCanaryRuns<C, M, R> {
fluent_builders::GetCanaryRuns::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListTagsForResource` operation.
///
/// See [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) for more information about the
/// operation and its arguments.
pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource<C, M, R> {
fluent_builders::ListTagsForResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StartCanary` operation.
///
/// See [`StartCanary`](crate::client::fluent_builders::StartCanary) for more information about the
/// operation and its arguments.
pub fn start_canary(&self) -> fluent_builders::StartCanary<C, M, R> {
fluent_builders::StartCanary::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StopCanary` operation.
///
/// See [`StopCanary`](crate::client::fluent_builders::StopCanary) for more information about the
/// operation and its arguments.
pub fn stop_canary(&self) -> fluent_builders::StopCanary<C, M, R> {
fluent_builders::StopCanary::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TagResource` operation.
///
/// See [`TagResource`](crate::client::fluent_builders::TagResource) for more information about the
/// operation and its arguments.
pub fn tag_resource(&self) -> fluent_builders::TagResource<C, M, R> {
fluent_builders::TagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UntagResource` operation.
///
/// See [`UntagResource`](crate::client::fluent_builders::UntagResource) for more information about the
/// operation and its arguments.
pub fn untag_resource(&self) -> fluent_builders::UntagResource<C, M, R> {
fluent_builders::UntagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateCanary` operation.
///
/// See [`UpdateCanary`](crate::client::fluent_builders::UpdateCanary) for more information about the
/// operation and its arguments.
pub fn update_canary(&self) -> fluent_builders::UpdateCanary<C, M, R> {
fluent_builders::UpdateCanary::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `CreateCanary`.
///
/// <p>Creates a canary. Canaries are scripts that monitor your endpoints and APIs from the
/// outside-in. Canaries help you check the availability and latency of your web services and
/// troubleshoot anomalies by investigating load time data, screenshots of the UI, logs, and
/// metrics. You can set up a canary to run continuously or just once. </p>
/// <p>Do not use <code>CreateCanary</code> to modify an existing canary. Use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_UpdateCanary.html">UpdateCanary</a> instead.</p>
/// <p>To create canaries, you must have the <code>CloudWatchSyntheticsFullAccess</code> policy.
/// If you are creating a new IAM role for the canary, you also need the
/// the <code>iam:CreateRole</code>, <code>iam:CreatePolicy</code> and
/// <code>iam:AttachRolePolicy</code> permissions. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Roles">Necessary
/// Roles and Permissions</a>.</p>
/// <p>Do not include secrets or proprietary information in your canary names. The canary name
/// makes up part of the Amazon Resource Name (ARN) for the canary, and the ARN is included in
/// outbound calls over the internet. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html">Security
/// Considerations for Synthetics Canaries</a>.</p>
#[derive(std::fmt::Debug)]
pub struct CreateCanary<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_canary_input::Builder,
}
impl<C, M, R> CreateCanary<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateCanary`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateCanaryOutput,
aws_smithy_http::result::SdkError<crate::error::CreateCanaryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateCanaryInputOperationOutputAlias,
crate::output::CreateCanaryOutput,
crate::error::CreateCanaryError,
crate::input::CreateCanaryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name for this canary. Be sure to give it a descriptive name
/// that distinguishes it from other canaries in your account.</p>
/// <p>Do not include secrets or proprietary information in your canary names. The canary name
/// makes up part of the canary ARN, and the ARN is included in outbound calls over the
/// internet. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html">Security
/// Considerations for Synthetics Canaries</a>.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>The name for this canary. Be sure to give it a descriptive name
/// that distinguishes it from other canaries in your account.</p>
/// <p>Do not include secrets or proprietary information in your canary names. The canary name
/// makes up part of the canary ARN, and the ARN is included in outbound calls over the
/// internet. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html">Security
/// Considerations for Synthetics Canaries</a>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>A structure that includes the entry point from which the canary should start
/// running your script. If the script is stored in
/// an S3 bucket, the bucket name, key, and version are also included.
/// </p>
pub fn code(mut self, inp: crate::model::CanaryCodeInput) -> Self {
self.inner = self.inner.code(inp);
self
}
/// <p>A structure that includes the entry point from which the canary should start
/// running your script. If the script is stored in
/// an S3 bucket, the bucket name, key, and version are also included.
/// </p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::CanaryCodeInput>,
) -> Self {
self.inner = self.inner.set_code(input);
self
}
/// <p>The location in Amazon S3 where Synthetics stores artifacts from the test runs of this
/// canary. Artifacts include the log file, screenshots, and HAR files. The name of the
/// S3 bucket can't include a period (.).</p>
pub fn artifact_s3_location(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.artifact_s3_location(inp);
self
}
/// <p>The location in Amazon S3 where Synthetics stores artifacts from the test runs of this
/// canary. Artifacts include the log file, screenshots, and HAR files. The name of the
/// S3 bucket can't include a period (.).</p>
pub fn set_artifact_s3_location(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_artifact_s3_location(input);
self
}
/// <p>The ARN of the IAM role to be used to run the canary. This role must already exist,
/// and must include <code>lambda.amazonaws.com</code> as a principal in the trust
/// policy. The role must also have the following permissions:</p>
/// <ul>
/// <li>
/// <p>
/// <code>s3:PutObject</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:GetBucketLocation</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:ListAllMyBuckets</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>cloudwatch:PutMetricData</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogGroup</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogStream</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:PutLogEvents</code>
/// </p>
/// </li>
/// </ul>
pub fn execution_role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.execution_role_arn(inp);
self
}
/// <p>The ARN of the IAM role to be used to run the canary. This role must already exist,
/// and must include <code>lambda.amazonaws.com</code> as a principal in the trust
/// policy. The role must also have the following permissions:</p>
/// <ul>
/// <li>
/// <p>
/// <code>s3:PutObject</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:GetBucketLocation</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:ListAllMyBuckets</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>cloudwatch:PutMetricData</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogGroup</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogStream</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:PutLogEvents</code>
/// </p>
/// </li>
/// </ul>
pub fn set_execution_role_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_execution_role_arn(input);
self
}
/// <p>A structure that contains information about how often the canary is to run and when
/// these test runs are to stop.</p>
pub fn schedule(mut self, inp: crate::model::CanaryScheduleInput) -> Self {
self.inner = self.inner.schedule(inp);
self
}
/// <p>A structure that contains information about how often the canary is to run and when
/// these test runs are to stop.</p>
pub fn set_schedule(
mut self,
input: std::option::Option<crate::model::CanaryScheduleInput>,
) -> Self {
self.inner = self.inner.set_schedule(input);
self
}
/// <p>A structure that contains the configuration for individual canary runs,
/// such as timeout value.</p>
pub fn run_config(mut self, inp: crate::model::CanaryRunConfigInput) -> Self {
self.inner = self.inner.run_config(inp);
self
}
/// <p>A structure that contains the configuration for individual canary runs,
/// such as timeout value.</p>
pub fn set_run_config(
mut self,
input: std::option::Option<crate::model::CanaryRunConfigInput>,
) -> Self {
self.inner = self.inner.set_run_config(input);
self
}
/// <p>The number of days to retain data about successful runs of this canary. If you omit
/// this field, the default of 31 days is used. The valid range is 1 to 455 days.</p>
pub fn success_retention_period_in_days(mut self, inp: i32) -> Self {
self.inner = self.inner.success_retention_period_in_days(inp);
self
}
/// <p>The number of days to retain data about successful runs of this canary. If you omit
/// this field, the default of 31 days is used. The valid range is 1 to 455 days.</p>
pub fn set_success_retention_period_in_days(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_success_retention_period_in_days(input);
self
}
/// <p>The number of days to retain data about failed runs of this canary. If you omit
/// this field, the default of 31 days is used. The valid range is 1 to 455 days.</p>
pub fn failure_retention_period_in_days(mut self, inp: i32) -> Self {
self.inner = self.inner.failure_retention_period_in_days(inp);
self
}
/// <p>The number of days to retain data about failed runs of this canary. If you omit
/// this field, the default of 31 days is used. The valid range is 1 to 455 days.</p>
pub fn set_failure_retention_period_in_days(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_failure_retention_period_in_days(input);
self
}
/// <p>Specifies the runtime version to use for the canary. For a list of valid
/// runtime versions and more information about
/// runtime versions, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html">
/// Canary Runtime Versions</a>.</p>
pub fn runtime_version(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.runtime_version(inp);
self
}
/// <p>Specifies the runtime version to use for the canary. For a list of valid
/// runtime versions and more information about
/// runtime versions, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html">
/// Canary Runtime Versions</a>.</p>
pub fn set_runtime_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_runtime_version(input);
self
}
/// <p>If this canary is to test an endpoint in a VPC, this structure contains
/// information about the subnet and security groups of the VPC endpoint.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_VPC.html">
/// Running a Canary in a VPC</a>.</p>
pub fn vpc_config(mut self, inp: crate::model::VpcConfigInput) -> Self {
self.inner = self.inner.vpc_config(inp);
self
}
/// <p>If this canary is to test an endpoint in a VPC, this structure contains
/// information about the subnet and security groups of the VPC endpoint.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_VPC.html">
/// Running a Canary in a VPC</a>.</p>
pub fn set_vpc_config(
mut self,
input: std::option::Option<crate::model::VpcConfigInput>,
) -> Self {
self.inner = self.inner.set_vpc_config(input);
self
}
/// Adds a key-value pair to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of key-value pairs to associate with the canary.
/// You can associate as many as 50 tags with a canary.</p>
/// <p>Tags can help you organize and categorize your
/// resources. You can also use them to scope user permissions, by
/// granting a user permission to access or change only the resources that have
/// certain tag values.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
/// <p>A list of key-value pairs to associate with the canary.
/// You can associate as many as 50 tags with a canary.</p>
/// <p>Tags can help you organize and categorize your
/// resources. You can also use them to scope user permissions, by
/// granting a user permission to access or change only the resources that have
/// certain tag values.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
/// <p>A structure that contains the configuration for canary artifacts, including
/// the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3.</p>
pub fn artifact_config(mut self, inp: crate::model::ArtifactConfigInput) -> Self {
self.inner = self.inner.artifact_config(inp);
self
}
/// <p>A structure that contains the configuration for canary artifacts, including
/// the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3.</p>
pub fn set_artifact_config(
mut self,
input: std::option::Option<crate::model::ArtifactConfigInput>,
) -> Self {
self.inner = self.inner.set_artifact_config(input);
self
}
}
/// Fluent builder constructing a request to `DeleteCanary`.
///
/// <p>Permanently deletes the specified canary.</p>
/// <p>When you delete a canary, resources used and created by the canary are not automatically deleted. After you delete a canary that you do not intend to
/// use again, you
/// should also delete the following:</p>
/// <ul>
/// <li>
/// <p>The Lambda functions and layers used by this canary. These have the prefix
/// <code>cwsyn-<i>MyCanaryName</i>
/// </code>.</p>
/// </li>
/// <li>
/// <p>The CloudWatch alarms created for this canary. These alarms have a name of
/// <code>Synthetics-SharpDrop-Alarm-<i>MyCanaryName</i>
/// </code>.</p>
/// </li>
/// <li>
/// <p>Amazon S3 objects and buckets, such as the canary's artifact location.</p>
/// </li>
/// <li>
/// <p>IAM roles created for the canary. If they were created in the console, these roles
/// have the name <code>
/// role/service-role/CloudWatchSyntheticsRole-<i>MyCanaryName</i>
/// </code>.</p>
/// </li>
/// <li>
/// <p>CloudWatch Logs log groups created for the canary. These logs groups have the name
/// <code>/aws/lambda/cwsyn-<i>MyCanaryName</i>
/// </code>. </p>
/// </li>
/// </ul>
///
/// <p>Before you delete a canary, you might want to use <code>GetCanary</code> to display
/// the information about this canary. Make
/// note of the information returned by this operation so that you can delete these resources
/// after you delete the canary.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteCanary<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_canary_input::Builder,
}
impl<C, M, R> DeleteCanary<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteCanary`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteCanaryOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteCanaryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteCanaryInputOperationOutputAlias,
crate::output::DeleteCanaryOutput,
crate::error::DeleteCanaryError,
crate::input::DeleteCanaryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the canary that you want to delete. To find the names of your canaries, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>The name of the canary that you want to delete. To find the names of your canaries, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeCanaries`.
///
/// <p>This operation returns a list of the canaries in your account, along with full details
/// about each canary.</p>
/// <p>This operation does not have resource-level authorization, so if a user is able to use
/// <code>DescribeCanaries</code>, the user can see all of the canaries in the account. A
/// deny policy can only be used to restrict access to all canaries. It cannot be used on
/// specific resources. </p>
#[derive(std::fmt::Debug)]
pub struct DescribeCanaries<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_canaries_input::Builder,
}
impl<C, M, R> DescribeCanaries<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeCanaries`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeCanariesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeCanariesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeCanariesInputOperationOutputAlias,
crate::output::DescribeCanariesOutput,
crate::error::DescribeCanariesError,
crate::input::DescribeCanariesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent operation to retrieve the next
/// set of results.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent operation to retrieve the next
/// set of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>Specify this parameter to limit how many canaries are returned each time you use
/// the <code>DescribeCanaries</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Specify this parameter to limit how many canaries are returned each time you use
/// the <code>DescribeCanaries</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `DescribeCanariesLastRun`.
///
/// <p>Use this operation to see information from the most recent run of each canary that you have created.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeCanariesLastRun<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_canaries_last_run_input::Builder,
}
impl<C, M, R> DescribeCanariesLastRun<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeCanariesLastRun`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeCanariesLastRunOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeCanariesLastRunError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeCanariesLastRunInputOperationOutputAlias,
crate::output::DescribeCanariesLastRunOutput,
crate::error::DescribeCanariesLastRunError,
crate::input::DescribeCanariesLastRunInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent <code>DescribeCanaries</code> operation to retrieve the next
/// set of results.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent <code>DescribeCanaries</code> operation to retrieve the next
/// set of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>Specify this parameter to limit how many runs are returned each time you use
/// the <code>DescribeLastRun</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Specify this parameter to limit how many runs are returned each time you use
/// the <code>DescribeLastRun</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `DescribeRuntimeVersions`.
///
/// <p>Returns a list of Synthetics canary runtime versions. For more information,
/// see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html">
/// Canary Runtime Versions</a>.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeRuntimeVersions<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_runtime_versions_input::Builder,
}
impl<C, M, R> DescribeRuntimeVersions<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeRuntimeVersions`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeRuntimeVersionsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeRuntimeVersionsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeRuntimeVersionsInputOperationOutputAlias,
crate::output::DescribeRuntimeVersionsOutput,
crate::error::DescribeRuntimeVersionsError,
crate::input::DescribeRuntimeVersionsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent <code>DescribeRuntimeVersions</code> operation to retrieve the next
/// set of results.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent <code>DescribeRuntimeVersions</code> operation to retrieve the next
/// set of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>Specify this parameter to limit how many runs are returned each time you use
/// the <code>DescribeRuntimeVersions</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Specify this parameter to limit how many runs are returned each time you use
/// the <code>DescribeRuntimeVersions</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `GetCanary`.
///
/// <p>Retrieves complete information about one canary. You must specify
/// the name of the canary that you want. To get a list of canaries
/// and their names, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
#[derive(std::fmt::Debug)]
pub struct GetCanary<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_canary_input::Builder,
}
impl<C, M, R> GetCanary<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetCanary`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetCanaryOutput,
aws_smithy_http::result::SdkError<crate::error::GetCanaryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetCanaryInputOperationOutputAlias,
crate::output::GetCanaryOutput,
crate::error::GetCanaryError,
crate::input::GetCanaryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the canary that you want details for.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>The name of the canary that you want details for.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
/// Fluent builder constructing a request to `GetCanaryRuns`.
///
/// <p>Retrieves a list of runs for a specified canary.</p>
#[derive(std::fmt::Debug)]
pub struct GetCanaryRuns<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_canary_runs_input::Builder,
}
impl<C, M, R> GetCanaryRuns<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetCanaryRuns`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetCanaryRunsOutput,
aws_smithy_http::result::SdkError<crate::error::GetCanaryRunsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetCanaryRunsInputOperationOutputAlias,
crate::output::GetCanaryRunsOutput,
crate::error::GetCanaryRunsError,
crate::input::GetCanaryRunsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the canary that you want to see runs for.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>The name of the canary that you want to see runs for.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent <code>GetCanaryRuns</code> operation to retrieve the next
/// set of results.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>A token that indicates that there is more data
/// available. You can use this token in a subsequent <code>GetCanaryRuns</code> operation to retrieve the next
/// set of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>Specify this parameter to limit how many runs are returned each time you use
/// the <code>GetCanaryRuns</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>Specify this parameter to limit how many runs are returned each time you use
/// the <code>GetCanaryRuns</code> operation. If you omit this parameter, the default of 100 is used.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `ListTagsForResource`.
///
/// <p>Displays the tags associated with a canary.</p>
#[derive(std::fmt::Debug)]
pub struct ListTagsForResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_tags_for_resource_input::Builder,
}
impl<C, M, R> ListTagsForResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListTagsForResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsForResourceInputOperationOutputAlias,
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
crate::input::ListTagsForResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ARN of the canary that you want to view tags for.</p>
/// <p>The ARN format of a canary is
/// <code>arn:aws:synthetics:<i>Region</i>:<i>account-id</i>:canary:<i>canary-name</i>
/// </code>.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The ARN of the canary that you want to view tags for.</p>
/// <p>The ARN format of a canary is
/// <code>arn:aws:synthetics:<i>Region</i>:<i>account-id</i>:canary:<i>canary-name</i>
/// </code>.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
}
/// Fluent builder constructing a request to `StartCanary`.
///
/// <p>Use this operation to run a canary that has already been created.
/// The frequency of the canary runs is determined by the value of the canary's <code>Schedule</code>. To see a canary's schedule,
/// use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanary.html">GetCanary</a>.</p>
#[derive(std::fmt::Debug)]
pub struct StartCanary<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_canary_input::Builder,
}
impl<C, M, R> StartCanary<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StartCanary`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartCanaryOutput,
aws_smithy_http::result::SdkError<crate::error::StartCanaryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartCanaryInputOperationOutputAlias,
crate::output::StartCanaryOutput,
crate::error::StartCanaryError,
crate::input::StartCanaryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the canary that you want to run. To find
/// canary names, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>The name of the canary that you want to run. To find
/// canary names, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
/// Fluent builder constructing a request to `StopCanary`.
///
/// <p>Stops the canary to prevent all future runs. If the canary is currently running,
/// Synthetics stops waiting for the current run of the specified canary to complete. The
/// run that is in progress completes on its own, publishes metrics, and uploads artifacts, but
/// it is not recorded in Synthetics as a completed run.</p>
/// <p>You can use <code>StartCanary</code> to start it running again
/// with the canary’s current schedule at any point in the future. </p>
#[derive(std::fmt::Debug)]
pub struct StopCanary<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::stop_canary_input::Builder,
}
impl<C, M, R> StopCanary<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StopCanary`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StopCanaryOutput,
aws_smithy_http::result::SdkError<crate::error::StopCanaryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StopCanaryInputOperationOutputAlias,
crate::output::StopCanaryOutput,
crate::error::StopCanaryError,
crate::input::StopCanaryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the canary that you want to stop. To find the names of your
/// canaries, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>The name of the canary that you want to stop. To find the names of your
/// canaries, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
/// Fluent builder constructing a request to `TagResource`.
///
/// <p>Assigns one or more tags (key-value pairs) to the specified canary. </p>
/// <p>Tags can help you organize and categorize your
/// resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with
/// certain tag values.</p>
/// <p>Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.</p>
/// <p>You can use the <code>TagResource</code> action with a canary that already has tags. If you specify a new tag key for the alarm,
/// this tag is appended to the list of tags associated
/// with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces
/// the previous value for that tag.</p>
/// <p>You can associate as many as 50 tags with a canary.</p>
#[derive(std::fmt::Debug)]
pub struct TagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::tag_resource_input::Builder,
}
impl<C, M, R> TagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TagResourceInputOperationOutputAlias,
crate::output::TagResourceOutput,
crate::error::TagResourceError,
crate::input::TagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ARN of the canary that you're adding tags to.</p>
/// <p>The ARN format of a canary is
/// <code>arn:aws:synthetics:<i>Region</i>:<i>account-id</i>:canary:<i>canary-name</i>
/// </code>.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The ARN of the canary that you're adding tags to.</p>
/// <p>The ARN format of a canary is
/// <code>arn:aws:synthetics:<i>Region</i>:<i>account-id</i>:canary:<i>canary-name</i>
/// </code>.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Adds a key-value pair to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The list of key-value pairs to associate with the canary.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
/// <p>The list of key-value pairs to associate with the canary.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `UntagResource`.
///
/// <p>Removes one or more tags from the specified canary.</p>
#[derive(std::fmt::Debug)]
pub struct UntagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::untag_resource_input::Builder,
}
impl<C, M, R> UntagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UntagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UntagResourceInputOperationOutputAlias,
crate::output::UntagResourceOutput,
crate::error::UntagResourceError,
crate::input::UntagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ARN of the canary that you're removing tags from.</p>
/// <p>The ARN format of a canary is
/// <code>arn:aws:synthetics:<i>Region</i>:<i>account-id</i>:canary:<i>canary-name</i>
/// </code>.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The ARN of the canary that you're removing tags from.</p>
/// <p>The ARN format of a canary is
/// <code>arn:aws:synthetics:<i>Region</i>:<i>account-id</i>:canary:<i>canary-name</i>
/// </code>.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `TagKeys`.
///
/// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
///
/// <p>The list of tag keys to remove from the resource.</p>
pub fn tag_keys(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(inp);
self
}
/// <p>The list of tag keys to remove from the resource.</p>
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
/// Fluent builder constructing a request to `UpdateCanary`.
///
/// <p>Use this operation to change the settings of a canary that has
/// already been created.</p>
/// <p>You can't use this operation to update the tags of an existing canary. To
/// change the tags of an existing canary, use
/// <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_TagResource.html">TagResource</a>.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateCanary<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_canary_input::Builder,
}
impl<C, M, R> UpdateCanary<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateCanary`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateCanaryOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateCanaryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateCanaryInputOperationOutputAlias,
crate::output::UpdateCanaryOutput,
crate::error::UpdateCanaryError,
crate::input::UpdateCanaryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the canary that you want to update. To find the names of your
/// canaries, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
/// <p>You cannot change the name of a canary that has already been created.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
/// <p>The name of the canary that you want to update. To find the names of your
/// canaries, use <a href="https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html">DescribeCanaries</a>.</p>
/// <p>You cannot change the name of a canary that has already been created.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>A structure that includes the entry point from which the canary should start
/// running your script. If the script is stored in
/// an S3 bucket, the bucket name, key, and version are also included.
/// </p>
pub fn code(mut self, inp: crate::model::CanaryCodeInput) -> Self {
self.inner = self.inner.code(inp);
self
}
/// <p>A structure that includes the entry point from which the canary should start
/// running your script. If the script is stored in
/// an S3 bucket, the bucket name, key, and version are also included.
/// </p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::CanaryCodeInput>,
) -> Self {
self.inner = self.inner.set_code(input);
self
}
/// <p>The ARN of the IAM role to be used to run the canary. This role must already exist,
/// and must include <code>lambda.amazonaws.com</code> as a principal in the trust
/// policy. The role must also have the following permissions:</p>
/// <ul>
/// <li>
/// <p>
/// <code>s3:PutObject</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:GetBucketLocation</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:ListAllMyBuckets</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>cloudwatch:PutMetricData</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogGroup</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogStream</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogStream</code>
/// </p>
/// </li>
/// </ul>
pub fn execution_role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.execution_role_arn(inp);
self
}
/// <p>The ARN of the IAM role to be used to run the canary. This role must already exist,
/// and must include <code>lambda.amazonaws.com</code> as a principal in the trust
/// policy. The role must also have the following permissions:</p>
/// <ul>
/// <li>
/// <p>
/// <code>s3:PutObject</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:GetBucketLocation</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>s3:ListAllMyBuckets</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>cloudwatch:PutMetricData</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogGroup</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogStream</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>logs:CreateLogStream</code>
/// </p>
/// </li>
/// </ul>
pub fn set_execution_role_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_execution_role_arn(input);
self
}
/// <p>Specifies the runtime version to use for the canary.
/// For a list of valid runtime versions and for more information about
/// runtime versions, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html">
/// Canary Runtime Versions</a>.</p>
pub fn runtime_version(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.runtime_version(inp);
self
}
/// <p>Specifies the runtime version to use for the canary.
/// For a list of valid runtime versions and for more information about
/// runtime versions, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html">
/// Canary Runtime Versions</a>.</p>
pub fn set_runtime_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_runtime_version(input);
self
}
/// <p>A structure that contains information about how often the canary is to run, and when
/// these runs are to stop.</p>
pub fn schedule(mut self, inp: crate::model::CanaryScheduleInput) -> Self {
self.inner = self.inner.schedule(inp);
self
}
/// <p>A structure that contains information about how often the canary is to run, and when
/// these runs are to stop.</p>
pub fn set_schedule(
mut self,
input: std::option::Option<crate::model::CanaryScheduleInput>,
) -> Self {
self.inner = self.inner.set_schedule(input);
self
}
/// <p>A structure that contains the timeout value that is used for each individual run of the
/// canary.</p>
pub fn run_config(mut self, inp: crate::model::CanaryRunConfigInput) -> Self {
self.inner = self.inner.run_config(inp);
self
}
/// <p>A structure that contains the timeout value that is used for each individual run of the
/// canary.</p>
pub fn set_run_config(
mut self,
input: std::option::Option<crate::model::CanaryRunConfigInput>,
) -> Self {
self.inner = self.inner.set_run_config(input);
self
}
/// <p>The number of days to retain data about successful runs of this canary.</p>
pub fn success_retention_period_in_days(mut self, inp: i32) -> Self {
self.inner = self.inner.success_retention_period_in_days(inp);
self
}
/// <p>The number of days to retain data about successful runs of this canary.</p>
pub fn set_success_retention_period_in_days(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_success_retention_period_in_days(input);
self
}
/// <p>The number of days to retain data about failed runs of this canary.</p>
pub fn failure_retention_period_in_days(mut self, inp: i32) -> Self {
self.inner = self.inner.failure_retention_period_in_days(inp);
self
}
/// <p>The number of days to retain data about failed runs of this canary.</p>
pub fn set_failure_retention_period_in_days(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_failure_retention_period_in_days(input);
self
}
/// <p>If this canary is to test an endpoint in a VPC, this structure contains
/// information about the subnet and security groups of the VPC endpoint.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_VPC.html">
/// Running a Canary in a VPC</a>.</p>
pub fn vpc_config(mut self, inp: crate::model::VpcConfigInput) -> Self {
self.inner = self.inner.vpc_config(inp);
self
}
/// <p>If this canary is to test an endpoint in a VPC, this structure contains
/// information about the subnet and security groups of the VPC endpoint.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_VPC.html">
/// Running a Canary in a VPC</a>.</p>
pub fn set_vpc_config(
mut self,
input: std::option::Option<crate::model::VpcConfigInput>,
) -> Self {
self.inner = self.inner.set_vpc_config(input);
self
}
/// <p>Defines the screenshots to use as the baseline for comparisons during visual monitoring comparisons during future runs of this canary. If you omit this
/// parameter, no changes are made to any baseline screenshots that the canary might be using already.</p>
/// <p>Visual monitoring is supported only on canaries running the <b>syn-puppeteer-node-3.2</b>
/// runtime or later. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Library_SyntheticsLogger_VisualTesting.html">
/// Visual monitoring</a> and <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Blueprints_VisualTesting.html">
/// Visual monitoring blueprint</a>
/// </p>
pub fn visual_reference(mut self, inp: crate::model::VisualReferenceInput) -> Self {
self.inner = self.inner.visual_reference(inp);
self
}
/// <p>Defines the screenshots to use as the baseline for comparisons during visual monitoring comparisons during future runs of this canary. If you omit this
/// parameter, no changes are made to any baseline screenshots that the canary might be using already.</p>
/// <p>Visual monitoring is supported only on canaries running the <b>syn-puppeteer-node-3.2</b>
/// runtime or later. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Library_SyntheticsLogger_VisualTesting.html">
/// Visual monitoring</a> and <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Blueprints_VisualTesting.html">
/// Visual monitoring blueprint</a>
/// </p>
pub fn set_visual_reference(
mut self,
input: std::option::Option<crate::model::VisualReferenceInput>,
) -> Self {
self.inner = self.inner.set_visual_reference(input);
self
}
/// <p>The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary.
/// Artifacts include the log file, screenshots, and HAR files. The name of the
/// S3 bucket can't include a period (.).</p>
pub fn artifact_s3_location(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.artifact_s3_location(inp);
self
}
/// <p>The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary.
/// Artifacts include the log file, screenshots, and HAR files. The name of the
/// S3 bucket can't include a period (.).</p>
pub fn set_artifact_s3_location(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_artifact_s3_location(input);
self
}
/// <p>A structure that contains the configuration for canary artifacts,
/// including the encryption-at-rest settings for artifacts that
/// the canary uploads to Amazon S3.</p>
pub fn artifact_config(mut self, inp: crate::model::ArtifactConfigInput) -> Self {
self.inner = self.inner.artifact_config(inp);
self
}
/// <p>A structure that contains the configuration for canary artifacts,
/// including the encryption-at-rest settings for artifacts that
/// the canary uploads to Amazon S3.</p>
pub fn set_artifact_config(
mut self,
input: std::option::Option<crate::model::ArtifactConfigInput>,
) -> Self {
self.inner = self.inner.set_artifact_config(input);
self
}
}
}
impl<C> Client<C, aws_hyper::AwsMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let client = aws_hyper::Client::new(conn).with_retry_config(retry_config.into());
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
aws_hyper::AwsMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let client = aws_hyper::Client::https().with_retry_config(retry_config.into());
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| 45.657447 | 229 | 0.59411 |
6a106db099085c619e270ce5fef615b9b77d52dd | 3,529 | use crate::errors::{LeftError, Result};
use std::convert::TryFrom;
#[derive(Debug, PartialEq, Clone)]
pub enum Command {
Chord(Vec<Keybind>),
Execute(String),
ExitChord,
Reload,
Kill,
}
impl TryFrom<&str> for Command {
type Error = LeftError;
fn try_from(value: &str) -> Result<Self> {
match value {
"Execute" => build_execute(value),
"ExitChord" => Ok(Self::ExitChord),
"Reload" => Ok(Self::Reload),
"Kill" => Ok(Self::Kill),
_ => Err(LeftError::CommandNotFound),
}
}
}
fn build_execute(raw: &str) -> Result<Command> {
let headless = without_head(raw, "Execute ");
Ok(Command::Execute(headless.to_owned()))
}
fn without_head<'a, 'b>(s: &'a str, head: &'b str) -> &'a str {
if !s.starts_with(head) {
return s;
}
&s[head.len()..]
}
#[derive(Debug, PartialEq, Clone)]
pub struct Keybind {
pub command: Command,
pub modifier: Vec<String>,
pub key: String,
}
pub trait Config {
fn mapped_bindings(&self) -> Vec<Keybind>;
}
#[cfg(feature = "watcher")]
pub mod watcher {
use crate::errors::{Error, Result};
use nix::sys::inotify::{AddWatchFlags, InitFlags, Inotify};
use std::os::unix::prelude::AsRawFd;
use std::path::Path;
use std::sync::Arc;
use tokio::sync::{oneshot, Notify};
use tokio::time::Duration;
pub struct Watcher {
fd: Inotify,
task_notify: Arc<Notify>,
_task_guard: oneshot::Receiver<()>,
}
impl Watcher {
pub fn new(config_file: &Path) -> Result<Watcher> {
const INOTIFY: mio::Token = mio::Token(0);
let fd = Inotify::init(InitFlags::all())?;
let mut flags = AddWatchFlags::empty();
flags.insert(AddWatchFlags::IN_MODIFY);
let _wd = fd.add_watch(config_file, flags)?;
let (guard, _task_guard) = oneshot::channel::<()>();
let notify = Arc::new(Notify::new());
let task_notify = notify.clone();
let mut poll = mio::Poll::new()?;
let mut events = mio::Events::with_capacity(1);
poll.registry().register(
&mut mio::unix::SourceFd(&fd.as_raw_fd()),
INOTIFY,
mio::Interest::READABLE,
)?;
let timeout = Duration::from_millis(50);
tokio::task::spawn_blocking(move || loop {
if guard.is_closed() {
return;
}
if let Err(err) = poll.poll(&mut events, Some(timeout)) {
log::warn!("Inotify socket poll failed with {:?}", err);
continue;
}
events
.iter()
.filter(|event| INOTIFY == event.token())
.for_each(|_| notify.notify_one());
});
Ok(Self {
fd,
task_notify,
_task_guard,
})
}
pub fn refresh_watch(&self, config_file: &Path) -> Error {
let mut flags = AddWatchFlags::empty();
flags.insert(AddWatchFlags::IN_MODIFY);
let _wd = self.fd.add_watch(config_file, flags)?;
Ok(())
}
pub fn has_events(&self) -> bool {
self.fd.read_events().is_ok()
}
/// Wait until readable.
pub async fn wait_readable(&mut self) {
self.task_notify.notified().await;
}
}
}
| 28.459677 | 76 | 0.517994 |
d9a1ae98333b6e30e68a04f23c3f58ea90327688 | 1,112 | //! Boilerplate error definitions.
use crate::abi::InvalidOutputType;
use thiserror::Error;
/// A type alias for std's Result with the Error as our error type.
pub type Result<T, E = ParseError> = std::result::Result<T, E>;
/// Error that can occur during human readable parsing
#[derive(Error, Debug)]
pub enum ParseError {
#[error("{0}")]
Message(String),
#[error(transparent)]
ParseError(#[from] super::Error),
}
macro_rules! _format_err {
($($tt:tt)*) => {
$crate::abi::ParseError::Message(format!($($tt)*))
};
}
pub(crate) use _format_err as format_err;
macro_rules! _bail {
($($tt:tt)*) => { return Err($crate::abi::error::format_err!($($tt)*)) };
}
pub(crate) use _bail as bail;
/// ABI codec related errors
#[derive(Error, Debug)]
pub enum AbiError {
/// Thrown when the ABI decoding fails
#[error(transparent)]
DecodingError(#[from] crate::abi::Error),
/// Thrown when detokenizing an argument
#[error(transparent)]
DetokenizationError(#[from] InvalidOutputType),
#[error("missing or wrong function selector")]
WrongSelector,
}
| 25.860465 | 77 | 0.656475 |
285037ff15b3e4c48dbdf9db3531cc0197278419 | 7,576 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(clippy::pedantic)]
use std::fmt::{self, Display};
use std::time::{Duration, Instant};
use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE_F64};
use neqo_common::qtrace;
use std::convert::TryFrom;
// CUBIC congestion control
// C is a constant fixed to determine the aggressiveness of window
// increase in high BDP networks.
pub const CUBIC_C: f64 = 0.4;
pub const CUBIC_ALPHA: f64 = 3.0 * (1.0 - 0.7) / (1.0 + 0.7);
// CUBIC_BETA = 0.7;
pub const CUBIC_BETA_USIZE_QUOTIENT: usize = 7;
pub const CUBIC_BETA_USIZE_DIVISOR: usize = 10;
/// The fast convergence ratio further reduces the congestion window when a congestion event
/// occurs before reaching the previous `W_max`.
pub const CUBIC_FAST_CONVERGENCE: f64 = 0.85; // (1.0 + CUBIC_BETA) / 2.0;
/// The minimum number of multiples of the datagram size that need
/// to be received to cause an increase in the congestion window.
/// When there is no loss, Cubic can return to exponential increase, but
/// this value reduces the magnitude of the resulting growth by a constant factor.
/// A value of 1.0 would mean a return to the rate used in slow start.
const EXPONENTIAL_GROWTH_REDUCTION: f64 = 2.0;
fn convert_to_f64(v: usize) -> f64 {
assert!(v < (1 << 53));
let mut f_64 = f64::try_from(u32::try_from(v >> 21).unwrap()).unwrap();
f_64 *= 2_097_152.0; // f_64 <<= 21
f_64 += f64::try_from(u32::try_from(v & 0x1f_ffff).unwrap()).unwrap();
f_64
}
#[derive(Debug)]
pub struct Cubic {
last_max_cwnd: f64,
estimated_tcp_cwnd: f64,
k: f64,
w_max: f64,
ca_epoch_start: Option<Instant>,
last_phase_was_tcp: bool,
tcp_acked_bytes: f64,
}
impl Default for Cubic {
fn default() -> Self {
Self {
last_max_cwnd: 0.0,
estimated_tcp_cwnd: 0.0,
k: 0.0,
w_max: 0.0,
ca_epoch_start: None,
last_phase_was_tcp: false,
tcp_acked_bytes: 0.0,
}
}
}
impl Display for Cubic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Cubic [last_max_cwnd: {}, k: {}, w_max: {}, ca_epoch_start: {:?}]",
self.last_max_cwnd, self.k, self.w_max, self.ca_epoch_start
)?;
Ok(())
}
}
#[allow(clippy::doc_markdown)]
impl Cubic {
/// Original equations is:
/// K = cubic_root(W_max*(1-beta_cubic)/C) (Eq. 2 RFC8312)
/// W_max is number of segments of the maximum segment size (MSS).
///
/// K is actually the time that W_cubic(t) = C*(t-K)^3 + W_max (Eq. 1) would
/// take to increase to W_max. We use bytes not MSS units, therefore this
/// equation will be: W_cubic(t) = C*MSS*(t-K)^3 + W_max.
///
/// From that equation we can calculate K as:
/// K = cubic_root((W_max - W_cubic) / C / MSS);
fn calc_k(&self, curr_cwnd: f64) -> f64 {
((self.w_max - curr_cwnd) / CUBIC_C / MAX_DATAGRAM_SIZE_F64).cbrt()
}
/// W_cubic(t) = C*(t-K)^3 + W_max (Eq. 1)
/// t is relative to the start of the congestion avoidance phase and it is in seconds.
fn w_cubic(&self, t: f64) -> f64 {
CUBIC_C * (t - self.k).powi(3) * MAX_DATAGRAM_SIZE_F64 + self.w_max
}
fn start_epoch(&mut self, curr_cwnd_f64: f64, new_acked_f64: f64, now: Instant) {
self.ca_epoch_start = Some(now);
// reset tcp_acked_bytes and estimated_tcp_cwnd;
self.tcp_acked_bytes = new_acked_f64;
self.estimated_tcp_cwnd = curr_cwnd_f64;
if self.last_max_cwnd <= curr_cwnd_f64 {
self.w_max = curr_cwnd_f64;
self.k = 0.0;
} else {
self.w_max = self.last_max_cwnd;
self.k = self.calc_k(curr_cwnd_f64);
}
qtrace!([self], "New epoch");
}
}
impl WindowAdjustment for Cubic {
// This is because of the cast in the last line from f64 to usize.
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn bytes_for_cwnd_increase(
&mut self,
curr_cwnd: usize,
new_acked_bytes: usize,
min_rtt: Duration,
now: Instant,
) -> usize {
let curr_cwnd_f64 = convert_to_f64(curr_cwnd);
let new_acked_f64 = convert_to_f64(new_acked_bytes);
if self.ca_epoch_start.is_none() {
// This is a start of a new congestion avoidance phase.
self.start_epoch(curr_cwnd_f64, new_acked_f64, now);
} else {
self.tcp_acked_bytes += new_acked_f64;
}
let time_ca = self
.ca_epoch_start
.map_or(min_rtt, |t| now + min_rtt - t)
.as_secs_f64();
let target_cubic = self.w_cubic(time_ca);
let tcp_cnt = self.estimated_tcp_cwnd / CUBIC_ALPHA;
while self.tcp_acked_bytes > tcp_cnt {
self.tcp_acked_bytes -= tcp_cnt;
self.estimated_tcp_cwnd += MAX_DATAGRAM_SIZE_F64;
}
let target_cwnd = target_cubic.max(self.estimated_tcp_cwnd);
// Calculate the number of bytes that would need to be acknowledged for an increase
// of `MAX_DATAGRAM_SIZE` to match the increase of `target - cwnd / cwnd` as defined
// in the specification (Sections 4.4 and 4.5).
// The amount of data required therefore reduces asymptotically as the target increases.
// If the target is not significantly higher than the congestion window, require a very large
// amount of acknowledged data (effectively block increases).
let mut acked_to_increase =
MAX_DATAGRAM_SIZE_F64 * curr_cwnd_f64 / (target_cwnd - curr_cwnd_f64).max(1.0);
// Limit increase to max 1 MSS per EXPONENTIAL_GROWTH_REDUCTION ack packets.
// This effectively limits target_cwnd to (1 + 1 / EXPONENTIAL_GROWTH_REDUCTION) cwnd.
acked_to_increase =
acked_to_increase.max(EXPONENTIAL_GROWTH_REDUCTION * MAX_DATAGRAM_SIZE_F64);
acked_to_increase as usize
}
fn reduce_cwnd(&mut self, curr_cwnd: usize, acked_bytes: usize) -> (usize, usize) {
let curr_cwnd_f64 = convert_to_f64(curr_cwnd);
// Fast Convergence
// If congestion event occurs before the maximum congestion window before the last congestion event,
// we reduce the the maximum congestion window and thereby W_max.
// check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be slightly off.
self.last_max_cwnd = if curr_cwnd_f64 + MAX_DATAGRAM_SIZE_F64 < self.last_max_cwnd {
curr_cwnd_f64 * CUBIC_FAST_CONVERGENCE
} else {
curr_cwnd_f64
};
self.ca_epoch_start = None;
(
curr_cwnd * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR,
acked_bytes * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR,
)
}
fn on_app_limited(&mut self) {
// Reset ca_epoch_start. Let it start again when the congestion controller
// exits the app-limited period.
self.ca_epoch_start = None;
}
#[cfg(test)]
fn last_max_cwnd(&self) -> f64 {
self.last_max_cwnd
}
#[cfg(test)]
fn set_last_max_cwnd(&mut self, last_max_cwnd: f64) {
self.last_max_cwnd = last_max_cwnd;
}
}
| 37.137255 | 111 | 0.642291 |
de259dda77786468f8bb3eddb1db7ff654403428 | 1,414 | // COLOR SCHEME TEST "MonokaiFree/MonokaiFree.tmTheme" "Rust"
#[derive(Debug)]
pub enum State {
//^ fg=#f92672 fs=
// ^ fg=#66d9ef fs=italic
// ^ fg=#a6e22e fs=
// ^ fg=#f8f8f2 fs=
Start,
// ^ fg=#f8f8f2 fs=
// ^ fg=#f8f8f2 fs=
Transient,
Closed,
}
impl From<&'a str> for State {
// ^ fg=#66d9ef fs=italic
// ^ fg=#66d9ef fs=italic
// ^ fg=#f8f8f2 fs=
// ^ fg=#f92672 fs=
// ^ fg=#a6e22e fs=
// ^ fg=#f8f8f2 fs=
fn from(s: &'a str) -> Self {
// ^ fg=#66d9ef fs=italic
// ^ fg=#a6e22e fs=
// ^ fg=#f8f8f2 fs=
// ^ fg=#fd971f fs=italic
// ^ fg=#f8f8f2 fs=
// ^ fg=#66d9ef fs=italic
// ^ fg=#f8f8f2 fs=
// ^^ fg=#f92672 fs=
// ^ fg=#66d9ef fs=italic
// ^ fg=#f8f8f2 fs=
match s {
// ^ fg=#f92672 fs=
// ^ fg=#f8f8f2 fs=
// ^ fg=#f8f8f2 fs=
"start" => State::Start,
// ^ fg=#e6db74 fs=
// ^ fg=#e6db74 fs=
// ^^ fg=#f92672 fs=
// ^^ fg=#f92672 fs=
"closed" => State::Closed,
// ^^ fg=#f92672 fs=
// ^^ fg=#f92672 fs=
_ => unreachable!(),
}
}
}
| 27.72549 | 61 | 0.373409 |
268efcc077f434cb26949ef0d65c2f6632d02a61 | 458 | #![deny(missing_debug_implementations, missing_docs)] // kcov-ignore
//! Provides loading logic for control settings.
pub use crate::{
button_to_player_index_mapper::ButtonToPlayerIndexMapper,
control_button_to_button_mapper::ControlButtonToButtonMapper, keyboard_ui_gen::KeyboardUiGen,
pcbl_repositioner::PcblRepositioner,
};
mod button_to_player_index_mapper;
mod control_button_to_button_mapper;
mod keyboard_ui_gen;
mod pcbl_repositioner;
| 30.533333 | 97 | 0.831878 |
d79c8d1f32702c54103ccda7d27652b7f1fbf95a | 23,068 | use std::ops::Bound;
use anyhow::Context as AnyhowContext;
use async_trait::async_trait;
use axum::extract::{Extension, Path, RawQuery};
use serde_derive::Deserialize;
use serde_json::{map::Map as JsonMap, Value as JsonValue};
use svc_agent::mqtt::ResponseStatus;
use svc_utils::extractors::AuthnExtractor;
use tracing::{field::display, instrument, Span};
use uuid::Uuid;
use crate::app::context::Context;
use crate::app::endpoint::prelude::*;
use crate::db;
///////////////////////////////////////////////////////////////////////////////
const MAX_SETS: usize = 10;
const MAX_LIMIT_PER_SET: i64 = 100;
#[derive(Debug, Deserialize)]
pub(crate) struct ReadPayload {
sets: Vec<String>,
attribute: Option<String>,
occurred_at: Option<i64>,
original_occurred_at: Option<i64>,
limit: Option<i64>,
}
#[derive(Debug, Deserialize)]
pub(crate) struct ReadRequest {
room_id: Uuid,
#[serde(flatten)]
payload: ReadPayload,
}
pub async fn read(
Extension(ctx): Extension<Arc<AppContext>>,
AuthnExtractor(agent_id): AuthnExtractor,
Path(room_id): Path<Uuid>,
RawQuery(query): RawQuery,
) -> RequestResult {
let payload = serde_qs::from_str(&query.unwrap_or_default())
.map_err(|e| anyhow!("Failed to parse qs, err = {:?}", e))
.error(AppErrorKind::InvalidQueryString)?;
let request = ReadRequest { room_id, payload };
ReadHandler::handle(
&mut ctx.start_message(),
request,
RequestParams::Http {
agent_id: &agent_id,
},
)
.await
}
pub(crate) struct ReadHandler;
#[async_trait]
impl RequestHandler for ReadHandler {
type Payload = ReadRequest;
#[instrument(skip_all, fields(room_id, scope, classroom_id))]
async fn handle<C: Context>(
context: &mut C,
Self::Payload { room_id, payload }: Self::Payload,
reqp: RequestParams<'_>,
) -> RequestResult {
Span::current().record("room_id", &display(room_id));
// Validate parameters.
let validation_error = match payload.sets.len() {
0 => Some(anyhow!("'sets' can't be empty")),
len if len > MAX_SETS => Some(anyhow!("too many 'sets'")),
_ => None,
};
if let Some(err) = validation_error {
return Err(err).error(AppErrorKind::InvalidStateSets);
}
// Choose limit.
let limit = std::cmp::min(
payload.limit.unwrap_or(MAX_LIMIT_PER_SET),
MAX_LIMIT_PER_SET,
);
// Check whether the room exists.
let room = helpers::find_room(context, room_id, helpers::RoomTimeRequirement::Any).await?;
// Authorize room events listing.
let room_id = room.id().to_string();
let object = AuthzObject::new(&["rooms", &room_id]).into();
let authz_time = context
.authz()
.authorize(
room.audience().into(),
reqp.as_account_id().to_owned(),
object,
"read".into(),
)
.await?;
// Default `occurred_at`: closing time of the room.
let time = room.time().map(|t| t.into());
let original_occurred_at = if let Some(original_occurred_at) = payload.original_occurred_at
{
original_occurred_at
} else if let Ok((_, Bound::Unbounded)) = time {
std::i64::MAX
} else if let Ok((Bound::Included(open), Bound::Excluded(close))) = time {
(close - open)
.num_nanoseconds()
.map(|n| n + 1)
.unwrap_or(std::i64::MAX)
} else {
return Err(anyhow!("Bad room time")).error(AppErrorKind::InvalidRoomTime);
};
// Retrieve state for each set from the DB and put them into a map.
let mut state = JsonMap::new();
let mut conn = context.get_ro_conn().await?;
for set in payload.sets.iter() {
Span::current().record("set", &set.as_str());
// Build a query for the particular set state.
let mut query =
db::event::SetStateQuery::new(room.id(), set.clone(), original_occurred_at, limit);
if let Some(ref attribute) = payload.attribute {
query = query.attribute(attribute);
}
if let Some(occurred_at) = payload.occurred_at {
query = query.occurred_at(occurred_at);
}
// If it is the only set specified at first execute a total count query and
// add `has_next` pagination flag to the state.
if payload.sets.len() == 1 {
let total_count = context
.metrics()
.measure_query(QueryKey::StateTotalCountQuery, query.total_count(&mut conn))
.await
.context("Failed to get state total count")
.error(AppErrorKind::DbQueryFailed)?;
let has_next = total_count as i64 > limit;
state.insert(String::from("has_next"), JsonValue::Bool(has_next));
}
// Limit the query and retrieve the state.
let set_state = context
.metrics()
.measure_query(QueryKey::StateQuery, query.execute(&mut conn))
.await
.context("Failed to get state")
.error(AppErrorKind::DbQueryFailed)?;
// Serialize to JSON and add to the state map.
let serialized_set_state = serde_json::to_value(set_state)
.context("Failed to serialize state")
.error(AppErrorKind::SerializationFailed)?;
match serialized_set_state.as_array().and_then(|a| a.first()) {
Some(event) if event.get("label").is_none() => {
// The first event has no label => simple set with a single event…
state.insert(set.to_owned(), event.to_owned());
}
_ => {
// …or it's a collection.
state.insert(set.to_owned(), serialized_set_state);
}
}
}
// Respond with state.
Ok(AppResponse::new(
ResponseStatus::OK,
JsonValue::Object(state),
context.start_timestamp(),
Some(authz_time),
))
}
}
///////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use serde_derive::Deserialize;
use serde_json::json;
use crate::db::event::Object as Event;
use crate::test_helpers::prelude::*;
use super::*;
///////////////////////////////////////////////////////////////////////////
#[derive(Deserialize)]
struct State {
messages: Vec<Event>,
layout: Event,
}
#[tokio::test]
async fn read_state_multiple_sets() {
let db = TestDb::new().await;
let agent = TestAgent::new("web", "user123", USR_AUDIENCE);
let (room, message_event, layout_event) = {
// Create room.
let mut conn = db.get_conn().await;
let room = shared_helpers::insert_room(&mut conn).await;
// Create events in the room.
let message_event = factory::Event::new()
.room_id(room.id())
.kind("message")
.set("messages")
.label("message-1")
.data(&json!({ "text": "hello", }))
.occurred_at(1000)
.created_by(&agent.agent_id())
.insert(&mut conn)
.await;
let layout_event = factory::Event::new()
.room_id(room.id())
.kind("layout")
.set("layout")
.data(&json!({ "name": "presentation", }))
.occurred_at(2000)
.created_by(&agent.agent_id())
.insert(&mut conn)
.await;
(room, message_event, layout_event)
};
// Allow agent to list events in the room.
let mut authz = TestAuthz::new();
let room_id = room.id().to_string();
let object = vec!["rooms", &room_id];
authz.allow(agent.account_id(), object, "read");
// Make state.read request.
let mut context = TestContext::new(db, authz);
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages"), String::from("layout")],
attribute: None,
occurred_at: None,
original_occurred_at: None,
limit: None,
},
};
let messages = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect("State reading failed");
// Assert last two events response.
let (state, respp, _) = find_response::<State>(messages.as_slice());
assert_eq!(respp.status(), ResponseStatus::OK);
assert_eq!(state.messages.len(), 1);
assert_eq!(state.messages[0].id(), message_event.id());
assert_eq!(state.layout.id(), layout_event.id());
}
#[derive(Deserialize)]
struct CollectionState {
messages: Vec<Event>,
has_next: bool,
}
#[tokio::test]
async fn read_state_collection() {
let db = TestDb::new().await;
let agent = TestAgent::new("web", "user123", USR_AUDIENCE);
let (room, db_events) = {
// Create room.
let mut conn = db.get_conn().await;
let room = shared_helpers::insert_room(&mut conn).await;
// Create events in the room.
let mut events = vec![];
for i in 0..6 {
let event = factory::Event::new()
.room_id(room.id())
.kind("message")
.set("messages")
.label(&format!("message-{}", i % 3 + 1))
.data(&json!({
"text": format!("message {}, version {}", i % 3 + 1, i / 3 + 1),
}))
.occurred_at(i * 1000)
.created_by(&agent.agent_id())
.insert(&mut conn)
.await;
events.push(event);
}
(room, events)
};
// Allow agent to list events in the room.
let mut authz = TestAuthz::new();
let room_id = room.id().to_string();
let object = vec!["rooms", &room_id];
authz.allow(agent.account_id(), object, "read");
// Make state.read request.
let mut context = TestContext::new(db, authz);
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages")],
attribute: None,
occurred_at: Some(2001),
original_occurred_at: None,
limit: Some(2),
},
};
let messages = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect("State reading failed (page 1)");
// Assert last two events response.
let (state, respp, _) = find_response::<CollectionState>(messages.as_slice());
assert_eq!(respp.status(), ResponseStatus::OK);
assert_eq!(state.messages.len(), 2);
assert_eq!(state.messages[0].id(), db_events[2].id());
assert_eq!(state.messages[1].id(), db_events[1].id());
assert_eq!(state.has_next, true);
// Request the next page.
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages")],
attribute: None,
occurred_at: Some(1),
original_occurred_at: Some(state.messages[1].original_occurred_at()),
limit: Some(2),
},
};
let messages = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect("State reading failed (page 2)");
// Assert the first event.
let (state, respp, _) = find_response::<CollectionState>(messages.as_slice());
assert_eq!(respp.status(), ResponseStatus::OK);
assert_eq!(state.messages.len(), 1);
assert_eq!(state.messages[0].id(), db_events[0].id());
assert_eq!(state.has_next, false);
}
#[tokio::test]
async fn read_state_collection_with_attribute_filter() {
let db = TestDb::new().await;
let agent = TestAgent::new("web", "user123", USR_AUDIENCE);
let room = {
// Create room.
let mut conn = db.get_conn().await;
let room = shared_helpers::insert_room(&mut conn).await;
// Create events in the room.
let mut events = vec![];
for i in 0..6 {
let mut factory = factory::Event::new()
.room_id(room.id())
.kind("message")
.set("messages")
.label(&format!("message-{}", i % 3 + 1))
.data(&json!({
"text": format!("message {}, version {}", i % 3 + 1, i / 3 + 1),
}))
.occurred_at(i * 1000)
.created_by(&agent.agent_id());
if i % 3 == 0 {
factory = factory.attribute("pinned");
}
let event = factory.insert(&mut conn).await;
events.push(event);
}
room
};
// Allow agent to list events in the room.
let mut authz = TestAuthz::new();
let room_id = room.id().to_string();
let object = vec!["rooms", &room_id];
authz.allow(agent.account_id(), object, "read");
// Make state.read request.
let mut context = TestContext::new(db, authz);
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages")],
attribute: Some(String::from("pinned")),
occurred_at: None,
original_occurred_at: None,
limit: None,
},
};
let messages = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect("State reading failed");
// Expect only an event with the expected attribute.
let (state, respp, _) = find_response::<CollectionState>(messages.as_slice());
assert_eq!(respp.status(), ResponseStatus::OK);
assert_eq!(state.messages.len(), 1);
assert_eq!(state.messages[0].attribute(), Some("pinned"));
}
#[tokio::test]
async fn read_state_collection_with_occurred_at_filter() {
let db = TestDb::new().await;
let agent = TestAgent::new("web", "user123", USR_AUDIENCE);
let (room, db_events) = {
// Create room.
let mut conn = db.get_conn().await;
let room = shared_helpers::insert_room(&mut conn).await;
// Create events in the room.
let mut events = vec![];
for i in 0..6 {
let event = factory::Event::new()
.room_id(room.id())
.kind("message")
.set("messages")
.label(&format!("message-{}", i % 3 + 1))
.data(&json!({
"text": format!("message {}, version {}", i % 3 + 1, i / 3 + 1),
}))
.occurred_at(i * 1000)
.created_by(&agent.agent_id())
.insert(&mut conn)
.await;
events.push(event);
}
(room, events)
};
// Allow agent to list events in the room.
let mut authz = TestAuthz::new();
let room_id = room.id().to_string();
let object = vec!["rooms", &room_id];
authz.allow(agent.account_id(), object, "read");
// Make state.read request.
let mut context = TestContext::new(db, authz);
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages")],
attribute: None,
occurred_at: Some(2001),
original_occurred_at: None,
limit: Some(2),
},
};
let messages = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect("State reading failed (page 1)");
// Assert last two events response.
let (state, respp, _) = find_response::<CollectionState>(messages.as_slice());
assert_eq!(respp.status(), ResponseStatus::OK);
assert_eq!(state.messages.len(), 2);
assert_eq!(state.messages[0].id(), db_events[2].id());
assert_eq!(state.messages[1].id(), db_events[1].id());
assert_eq!(state.has_next, true);
// Request the next page.
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages")],
attribute: None,
occurred_at: Some(1),
original_occurred_at: Some(state.messages[1].original_occurred_at()),
limit: Some(2),
},
};
let messages = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect("State reading failed (page 2)");
// Assert the first event.
let (state, respp, _) = find_response::<CollectionState>(messages.as_slice());
assert_eq!(respp.status(), ResponseStatus::OK);
assert_eq!(state.messages.len(), 1);
assert_eq!(state.messages[0].id(), db_events[0].id());
assert_eq!(state.has_next, false);
}
#[tokio::test]
async fn read_state_pinned_messages() {
let db = TestDb::new().await;
let agent = TestAgent::new("web", "user123", USR_AUDIENCE);
let (room, pinned_message) = {
// Create room.
let mut conn = db.get_conn().await;
let room = shared_helpers::insert_room(&mut conn).await;
// Create a not pinned message.
let base_message_factory = factory::Event::new()
.room_id(room.id())
.kind("message")
.set("messages")
.data(&json!({"text": "hello"}))
.created_by(&agent.agent_id());
base_message_factory
.clone()
.label("message-1")
.occurred_at(1000)
.insert(&mut conn)
.await;
// Create a pinned message.
let pinned_message_factory = base_message_factory.clone().label("message-2");
pinned_message_factory
.clone()
.occurred_at(2000)
.insert(&mut conn)
.await;
let pinned_message = pinned_message_factory
.occurred_at(3000)
.attribute("pinned")
.insert(&mut conn)
.await;
// Create an unpinned message.
let unpinned_message_factory = base_message_factory.label("message-3");
unpinned_message_factory
.clone()
.occurred_at(4000)
.insert(&mut conn)
.await;
unpinned_message_factory
.clone()
.occurred_at(5000)
.attribute("pinned")
.insert(&mut conn)
.await;
unpinned_message_factory
.occurred_at(6000)
.insert(&mut conn)
.await;
(room, pinned_message)
};
// Allow agent to list events in the room.
let mut authz = TestAuthz::new();
let room_id = room.id().to_string();
let object = vec!["rooms", &room_id];
authz.allow(agent.account_id(), object, "read");
// Make state.read request.
let mut context = TestContext::new(db, authz);
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages")],
attribute: Some(String::from("pinned")),
occurred_at: None,
original_occurred_at: None,
limit: None,
},
};
let messages = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect("State reading failed");
// Assert last two events response.
let (state, respp, _) = find_response::<CollectionState>(messages.as_slice());
assert_eq!(respp.status(), ResponseStatus::OK);
assert_eq!(state.messages.len(), 1);
assert_eq!(state.messages[0].id(), pinned_message.id());
}
#[tokio::test]
async fn read_state_not_authorized() {
let db = TestDb::new().await;
let agent = TestAgent::new("web", "user123", USR_AUDIENCE);
let room = {
let mut conn = db.get_conn().await;
shared_helpers::insert_room(&mut conn).await
};
let mut context = TestContext::new(db, TestAuthz::new());
let payload = ReadRequest {
room_id: room.id(),
payload: ReadPayload {
sets: vec![String::from("messages"), String::from("layout")],
attribute: None,
occurred_at: None,
original_occurred_at: None,
limit: None,
},
};
let err = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect_err("Unexpected success reading state");
assert_eq!(err.status(), ResponseStatus::FORBIDDEN);
}
#[tokio::test]
async fn read_state_missing_room() {
let agent = TestAgent::new("web", "user123", USR_AUDIENCE);
let mut context = TestContext::new(TestDb::new().await, TestAuthz::new());
let payload = ReadRequest {
room_id: Uuid::new_v4(),
payload: ReadPayload {
sets: vec![String::from("messages"), String::from("layout")],
attribute: None,
occurred_at: None,
original_occurred_at: None,
limit: None,
},
};
let err = handle_request::<ReadHandler>(&mut context, &agent, payload)
.await
.expect_err("Unexpected success reading state");
assert_eq!(err.status(), ResponseStatus::NOT_FOUND);
assert_eq!(err.kind(), "room_not_found");
}
}
| 34.073855 | 99 | 0.515909 |
89d9aa936f9e3cd45d513612114fc1582c7b6bee | 3,254 | #![recursion_limit = "1024"]
#[macro_use]
extern crate error_chain;
extern crate iron;
extern crate hyper;
extern crate logger;
extern crate time;
extern crate serde;
extern crate serde_json;
extern crate handlebars_iron;
mod errors;
use iron::prelude::*;
use iron::status;
use iron::Handler;
use iron::AfterMiddleware;
use iron::headers::{HttpDate, CacheControl, CacheDirective, LastModified};
use logger::Logger;
use std::error::Error;
use handlebars_iron::{HandlebarsEngine, DirectorySource, Template};
use hyper::server::Listening;
use hyper::client::Client;
use std::collections::BTreeMap;
const JOY_URL: &'static str = "https://raw.githubusercontent.com/squidpickles/slippybot/master/joy.json";
const LISTEN_ADDR: &'static str = "127.0.0.1:8707";
const CACHE_TIMEOUT: u32 = 300; // seconds
struct JoyHandler;
impl JoyHandler {
pub fn new() -> JoyHandler {
JoyHandler {}
}
fn fetch_joy(&self) -> Result<BTreeMap<String, Vec<String>>, errors::Error> {
let client = Client::new();
let response = try!(client.get(JOY_URL).send());
let joy = try!(serde_json::from_reader(response));
Ok(joy)
}
fn set_cache_headers(&self, res: &mut Response, date: time::Tm) {
res.headers.set(CacheControl(vec![
CacheDirective::MaxAge(CACHE_TIMEOUT)
]));
res.headers.set(LastModified(HttpDate(date)));
}
}
impl Handler for JoyHandler {
fn handle(&self, _: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let joy = match self.fetch_joy() {
Ok(jj) => jj,
Err(err) => return Err(iron::IronError::new(err, status::InternalServerError))
};
let mut vars = BTreeMap::new();
vars.insert("joy".to_owned(), joy);
let now = time::now_utc();
resp.set_mut(Template::new("index", vars)).set_mut(status::Ok);
self.set_cache_headers(&mut resp, now);
Ok(resp)
}
}
struct ErrorReporter;
impl AfterMiddleware for ErrorReporter {
fn catch(&self, _: &mut Request, err: IronError) -> IronResult<Response> {
println!("{}", err.description());
Err(err)
}
}
pub struct WebServer {
chain: Chain,
}
impl WebServer {
pub fn new() -> Result<WebServer, errors::Error> {
let mut chain = Chain::new(JoyHandler::new());
let mut hbse = HandlebarsEngine::new();
let source = Box::new(DirectorySource::new("./templates/", ".hbs"));
hbse.add(source);
try!(match hbse.reload() {
Err(err) => Err(format!("Handlebars error: {}", err.description())),
_ => Ok(())
});
chain.link_after(hbse);
let (logger_before, logger_after) = Logger::new(None);
chain.link_before(logger_before);
chain.link_after(logger_after);
chain.link_after(ErrorReporter);
Ok(WebServer { chain: chain })
}
pub fn run(self, listen_address: &str) -> Result<Listening, errors::Error> {
let iron = Iron::new(self.chain);
iron.http(listen_address).map_err(|e| e.into())
}
}
pub fn main() {
let server = WebServer::new().unwrap();
println!("Listening on {}", LISTEN_ADDR);
server.run(LISTEN_ADDR).unwrap();
}
| 28.79646 | 105 | 0.629072 |
8f0ec5ec7a19e89a30536a2352684dbb4c5e60cc | 783 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing casting of a generic Struct to a Trait with a generic method.
// This is test for issue 10955.
#![allow(unused_variable)]
trait Foo {
fn f<A>(a: A) -> A {
a
}
}
struct Bar<T> {
x: T,
}
impl<T> Foo for Bar<T> { }
pub fn main() {
let a = Bar { x: 1u };
let b = &a as &Foo;
}
| 25.258065 | 72 | 0.662835 |
e6a79fd9854761e637a4c9e1eb6631960ef33563 | 13,267 | //! Datagram sockets.
mod receiver;
mod sender;
use self::receiver::Receiver;
use self::sender::Sender;
use crate::common::{do_bind, Common};
use crate::ioctl::*;
use crate::prelude::*;
use crate::runtime::Runtime;
use crate::sockopt::*;
const MAX_BUF_SIZE: usize = 64 * 1024;
pub struct DatagramSocket<A: Addr + 'static, R: Runtime> {
common: Arc<Common<A, R>>,
state: RwLock<State>,
sender: Sender<A, R>,
receiver: Arc<Receiver<A, R>>,
}
impl<A: Addr, R: Runtime> DatagramSocket<A, R> {
pub fn new(nonblocking: bool) -> Result<Self> {
let common = Arc::new(Common::new(Type::DGRAM, nonblocking)?);
let state = RwLock::new(State::new());
let sender = Sender::new(common.clone());
let receiver = Receiver::new(common.clone());
Ok(Self {
common,
state,
sender,
receiver,
})
}
pub fn new_pair(nonblocking: bool) -> Result<(Self, Self)> {
let (common1, common2) = Common::new_pair(Type::DGRAM, nonblocking)?;
let socket1 = Self::new_connected(common1);
let socket2 = Self::new_connected(common2);
Ok((socket1, socket2))
}
fn new_connected(common: Common<A, R>) -> Self {
let common = Arc::new(common);
let state = RwLock::new(State::new_connected());
let sender = Sender::new(common.clone());
let receiver = Receiver::new(common.clone());
receiver.initiate_async_recv();
Self {
common,
state,
sender,
receiver,
}
}
pub fn domain(&self) -> Domain {
A::domain()
}
pub fn host_fd(&self) -> HostFd {
self.common.host_fd()
}
pub fn status_flags(&self) -> StatusFlags {
// Only support O_NONBLOCK
if self.common.nonblocking() {
StatusFlags::O_NONBLOCK
} else {
StatusFlags::empty()
}
}
pub fn set_status_flags(&self, new_flags: StatusFlags) -> Result<()> {
// Only support O_NONBLOCK
let nonblocking = new_flags.is_nonblocking();
self.common.set_nonblocking(nonblocking);
Ok(())
}
/// When creating a datagram socket, you can use `bind` to bind the socket
/// to a address, hence another socket can send data to this address.
///
/// Binding is divided into explicit and implicit. Invoking `bind` is
/// explicit binding, while invoking `sendto` / `sendmsg` / `connect`
/// will trigger implicit binding.
///
/// Datagram sockets can only bind once. You should use explicit binding or
/// just implicit binding. The explicit binding will failed if it happens after
/// a implicit binding.
pub fn bind(&self, addr: &A) -> Result<()> {
let mut state = self.state.write().unwrap();
if state.is_bound() {
return_errno!(EINVAL, "The socket is already bound to an address");
}
do_bind(self.host_fd(), addr)?;
self.common.set_addr(addr);
state.mark_explicit_bind();
// Start async recv after explicit binding or implicit binding
self.receiver.initiate_async_recv();
Ok(())
}
/// Datagram sockets provide only connectionless interactions, But datagram sockets
/// can also use connect to associate a socket with a specific address.
/// After connection, any data sent on the socket is automatically addressed to the
/// connected peer, and only data received from that peer is delivered to the user.
///
/// Unlike stream sockets, datagram sockets can connect multiple times. But the socket
/// can only connect to one peer in the same time; a second connect will change the
/// peer address, and a connect to a address with family AF_UNSPEC will dissolve the
/// association ("disconnect" or "unconnect").
///
/// Before connection you can only use `sendto` / `sendmsg` / `recvfrom` / `recvmsg`.
/// Only after connection, you can use `read` / `recv` / `write` / `send`.
/// And you can ignore the address in `sendto` / `sendmsg` if you just want to
/// send data to the connected peer.
///
/// Ref 1: http://osr507doc.xinuos.com/en/netguide/disockD.connecting_datagrams.html
/// Ref 2: https://www.masterraghu.com/subjects/np/introduction/unix_network_programming_v1.3/ch08lev1sec11.html
pub async fn connect(&self, peer_addr: Option<&A>) -> Result<()> {
let mut state = self.state.write().unwrap();
if !state.is_connected() && peer_addr.is_none() {
return Ok(());
}
if let Some(peer) = peer_addr {
// We don't do connect syscall (or send connect io-uring requests) actually,
// We emulate the connect behavior by recording the peer address and applying
// the peer address during sendmsg and recvmsg.
//
// The advantage of emulation is avoiding to design a intermediate state (connecting)
// and avoiding to deal with some complex case. e.g, If we do connect actually,
// disconnect or connect to new address might affect the ongoing async recv request,
// which might increase the design complexity.
self.common.set_peer_addr(peer);
state.mark_connected();
if !state.is_bound() {
state.mark_implicit_bind();
// Start async recv after explicit binding or implicit binding
self.receiver.initiate_async_recv();
}
// TODO: update binding address in some cases
// For a ipv4 socket bound to 0.0.0.0 (INADDR_ANY), if you do connection
// to 127.0.0.1 (Local IP address), the IP address of the socket will
// change to 127.0.0.1 too. And if connect to non-local IP address, linux
// will assign a address to the socket.
// In both cases, we should update the binding address that we stored.
} else {
self.common.reset_peer_addr();
state.mark_disconnected();
// TODO: clear binding in some cases.
// Disconnect will effect the binding address. In Linux, for socket that
// explicit bound to local IP address, disconnect will clear the binding address,
// but leave the port intact. For socket with implicit bound, disconnect will
// clear both the address and port.
}
Ok(())
}
pub async fn read(&self, buf: &mut [u8]) -> Result<usize> {
self.readv(&mut [buf]).await
}
pub async fn readv(&self, bufs: &mut [&mut [u8]]) -> Result<usize> {
let state = self.state.read().unwrap();
if !state.is_connected() {
return_errno!(ENOTCONN, "the socket is not connected");
}
drop(state);
self.recvmsg(bufs, RecvFlags::empty())
.await
.map(|(ret, ..)| ret)
}
/// You can not invoke `recvfrom` directly after creating a datagram socket.
/// That is because `recvfrom` doesn't privide a implicit binding. If you
/// don't do a explicit or implicit binding, the sender doesn't know where
/// to send the data.
pub async fn recvmsg(&self, bufs: &mut [&mut [u8]], flags: RecvFlags) -> Result<(usize, A)> {
self.receiver.recvmsg(bufs, flags).await
}
pub async fn write(&self, buf: &[u8]) -> Result<usize> {
self.writev(&[buf]).await
}
pub async fn writev(&self, bufs: &[&[u8]]) -> Result<usize> {
self.sendmsg(bufs, None, SendFlags::empty()).await
}
pub async fn sendmsg(
&self,
bufs: &[&[u8]],
addr: Option<&A>,
flags: SendFlags,
) -> Result<usize> {
let state = self.state.read().unwrap();
if addr.is_none() && !state.is_connected() {
return_errno!(ENOTCONN, "the socket is not connected");
}
let res = if let Some(addr) = addr {
drop(state);
self.sender.sendmsg(bufs, addr, flags).await
} else {
let peer = self.common.peer_addr().unwrap();
drop(state);
self.sender.sendmsg(bufs, &peer, flags).await
};
let mut state = self.state.write().unwrap();
if !state.is_bound() {
state.mark_implicit_bind();
// Start async recv after explicit binding or implicit binding
self.receiver.initiate_async_recv();
}
res
}
pub fn poll(&self, mask: Events, poller: Option<&Poller>) -> Events {
let pollee = self.common.pollee();
pollee.poll(mask, poller)
}
pub fn register_observer(&self, observer: Arc<dyn Observer>, mask: Events) -> Result<()> {
let pollee = self.common.pollee();
pollee.register_observer(observer, mask);
Ok(())
}
pub fn unregister_observer(&self, observer: &Arc<dyn Observer>) -> Result<Arc<dyn Observer>> {
let pollee = self.common.pollee();
pollee
.unregister_observer(observer)
.ok_or_else(|| errno!(ENOENT, "the observer is not registered"))
}
pub fn addr(&self) -> Result<A> {
let common = &self.common;
// Always get addr from host.
// Because for IP socket, users can specify "0" as port and the kernel should select a usable port for him.
// Thus, when calling getsockname, this should be updated.
let addr = common.get_addr_from_host()?;
common.set_addr(&addr);
Ok(addr)
}
pub fn peer_addr(&self) -> Result<A> {
let state = self.state.read().unwrap();
if !state.is_connected() {
return_errno!(ENOTCONN, "the socket is not connected");
}
Ok(self.common.peer_addr().unwrap())
}
pub fn ioctl(&self, cmd: &mut dyn IoctlCmd) -> Result<()> {
async_io::match_ioctl_cmd_mut!(&mut *cmd, {
cmd: GetSockOptRawCmd => {
cmd.execute(self.host_fd())?;
},
cmd: SetSockOptRawCmd => {
cmd.execute(self.host_fd())?;
},
cmd: GetAcceptConnCmd => {
// Datagram doesn't support listen
cmd.set_output(0);
},
cmd: GetDomainCmd => {
cmd.set_output(self.domain() as _);
},
cmd: GetPeerNameCmd => {
let peer = self.peer_addr()?;
cmd.set_output(AddrStorage(peer.to_c_storage()));
},
cmd: GetTypeCmd => {
cmd.set_output(self.common.type_() as _);
},
cmd: GetIfReqWithRawCmd => {
cmd.execute(self.host_fd())?;
},
cmd: GetIfConf => {
cmd.execute(self.host_fd())?;
},
_ => {
return_errno!(EINVAL, "Not supported yet");
}
});
Ok(())
}
fn cancel_requests(&self) {
self.receiver.cancel_requests();
}
}
impl<A: Addr + 'static, R: Runtime> Drop for DatagramSocket<A, R> {
fn drop(&mut self) {
self.common.set_closed();
self.cancel_requests();
}
}
impl<A: Addr + 'static, R: Runtime> std::fmt::Debug for DatagramSocket<A, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DatagramSocket")
.field("common", &self.common)
.field("state", &self.state.read().unwrap())
.finish()
}
}
#[derive(Debug)]
struct State {
bind_state: BindState,
is_connected: bool,
}
impl State {
pub fn new() -> Self {
Self {
bind_state: BindState::Unbound,
is_connected: false,
}
}
pub fn new_connected() -> Self {
Self {
bind_state: BindState::Unbound,
is_connected: true,
}
}
pub fn is_bound(&self) -> bool {
self.bind_state.is_bound()
}
#[allow(dead_code)]
pub fn is_explicit_bound(&self) -> bool {
self.bind_state.is_explicit_bound()
}
#[allow(dead_code)]
pub fn is_implicit_bound(&self) -> bool {
self.bind_state.is_implicit_bound()
}
pub fn is_connected(&self) -> bool {
self.is_connected
}
pub fn mark_explicit_bind(&mut self) {
self.bind_state = BindState::ExplicitBound;
}
pub fn mark_implicit_bind(&mut self) {
self.bind_state = BindState::ImplicitBound;
}
pub fn mark_connected(&mut self) {
self.is_connected = true;
}
pub fn mark_disconnected(&mut self) {
self.is_connected = false;
}
}
#[derive(Debug)]
enum BindState {
Unbound,
ExplicitBound,
ImplicitBound,
}
impl BindState {
pub fn is_bound(&self) -> bool {
match self {
Self::Unbound => false,
_ => true,
}
}
#[allow(dead_code)]
pub fn is_explicit_bound(&self) -> bool {
match self {
Self::ExplicitBound => true,
_ => false,
}
}
#[allow(dead_code)]
pub fn is_implicit_bound(&self) -> bool {
match self {
Self::ImplicitBound => true,
_ => false,
}
}
}
| 32.437653 | 116 | 0.573151 |
d66af364dd7fc57953d6f17d6cd491e1a1c5aef8 | 731 | #![cfg_attr(not(any(test, feature = "std")), no_std)]
use ink_lang::contract;
use ink_core::storage;
use ink_core::env::DefaultSrmlTypes;
contract! {
#![env = DefaultSrmlTypes]
struct Incrementer {
// ACTION: Create a `storage::Value` called `value` which holds a `u64`
}
impl Deploy for Incrementer {
fn deploy(&mut self, init_value: u64) {
// ACTION: `set` the initial value of `value` with `init_value`
}
}
impl Incrementer {
// Implementation of Contract Functions
}
}
#[cfg(all(test, feature = "test-env"))]
mod tests {
use super::*;
#[test]
fn incrementer_works() {
let mut contract = Incrementer::deploy_mock(5);
}
}
| 21.5 | 79 | 0.601915 |
22fdaabfcb89b68821fde9b65c7a137c2b1439ba | 1,386 | use crate::spec::TargetOptions;
use crate::spec::{FramePointer, LinkerFlavor, SanitizerSet, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::apple_base::opts("macos");
base.cpu = "core2".to_string();
base.max_atomic_width = Some(128); // core2 support cmpxchg16b
base.frame_pointer = FramePointer::Always;
base.pre_link_args.insert(
LinkerFlavor::Gcc,
vec!["-m64".to_string(), "-arch".to_string(), "x86_64".to_string()],
);
base.link_env_remove.extend(super::apple_base::macos_link_env_remove());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
base.supported_sanitizers =
SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::LEAK | SanitizerSet::THREAD;
// Clang automatically chooses a more specific target based on
// MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
// correctly, we do too.
let arch = "x86_64";
let llvm_target = super::apple_base::macos_llvm_target(&arch);
Target {
llvm_target,
pointer_width: 64,
data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
arch: arch.to_string(),
options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base },
}
}
| 40.764706 | 94 | 0.670996 |
67a836bf0de89450d1bf50f5660620dadb4dcd1f | 4,010 | use super::*;
use frame_support::pallet_prelude::MaxEncodedLen;
// type AccountOf<T> = <T as frame_system::Config>::AccountId;
// type BalanceOf<T> = <<T as pallet::Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
type BlockNumberOf<T> = <T as frame_system::Config>::BlockNumber;
/// The custom struct for storing info of storage MinerInfo.
#[derive(PartialEq, Eq, Default, Encode, Decode, Clone, MaxEncodedLen, RuntimeDebug, TypeInfo)]
pub struct MinerInfo<BoundedString> {
pub(super) peerid: u64,
pub(super) ip: BoundedString,
pub(super) power: u128,
pub(super) space: u128,
}
/// The custom struct for storing info of storage miners.
#[derive(PartialEq, Eq, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct Mr<AccountId, Balance, BoundedString> {
pub(super) peerid: u64,
//Income account
pub(super) beneficiary: AccountId,
pub(super) ip: BoundedString,
pub(super) collaterals: Balance,
pub(super) earnings: Balance,
pub(super) locked: Balance,
//nomal, exit, frozen, e_frozen
pub(super) state: BoundedString,
pub(super) power: u128,
pub(super) space: u128,
pub(super) public_key: BoundedString,
}
/// The custom struct for storing index of segment, miner's current power and space.
#[derive(PartialEq, Eq, Default, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct SegmentInfo {
pub(super) segment_index: u64,
}
/// The custom struct for storing info of storage StorageInfo.
#[derive(PartialEq, Eq, Default, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct StorageInfo {
pub(super) used_storage: u128,
pub(super) available_storage: u128,
pub(super) time: u128,
}
/// The custom struct for miner table of block explorer.
#[derive(PartialEq, Eq, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct TableInfo<AccountId, Balance> {
pub(super) address: AccountId,
pub(super) beneficiary: AccountId,
pub(super) total_storage: u128,
pub(super) average_daily_data_traffic_in: u64,
pub(super) average_daily_data_traffic_out: u64,
pub(super) mining_reward: Balance,
}
/// The custom struct for miner detail of block explorer.
#[derive(PartialEq, Eq, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct MinerDetailInfo<AccountId, Balance, BoundedString> {
pub(super) address: AccountId,
pub(super) beneficiary: AccountId,
pub(super) ip: BoundedString,
pub(super) power: u128,
pub(super) space: u128,
pub(super) total_reward: Balance,
pub(super) total_rewards_currently_available: Balance,
pub(super) totald_not_receive: Balance,
pub(super) collaterals: Balance,
}
/// The custom struct for miner detail of block explorer.
#[derive(PartialEq, Eq, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct MinerStatInfo<Balance> {
pub(super) total_miners: u64,
pub(super) active_miners: u64,
pub(super) staking: Balance,
pub(super) miner_reward: Balance,
pub(super) sum_files: u128,
}
/// The custom struct for storing info of storage CalculateRewardOrder.
#[derive(PartialEq, Eq, Encode, Default, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
#[scale_info(skip_type_params(T))]
#[codec(mel_bound())]
pub struct CalculateRewardOrder <T: pallet::Config>{
pub(super) calculate_reward:u128,
pub(super) start_t: BlockNumberOf<T>,
pub(super) deadline: BlockNumberOf<T>,
}
/// The custom struct for storing info of storage RewardClaim.
#[derive(PartialEq, Eq, Encode, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct RewardClaim <AccountId, Balance>{
pub(super) beneficiary: AccountId,
pub(super) total_reward: Balance,
pub(super) have_to_receive: Balance,
pub(super) current_availability: Balance,
pub(super) total_not_receive: Balance,
}
/// The custom struct for storing info of storage FaucetRecord.
#[derive(PartialEq, Eq, Encode, Default, Decode, Clone, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct FaucetRecord <BlockNumber>{
pub(super) last_claim_time: BlockNumber,
} | 41.340206 | 118 | 0.762095 |
764927d40ae52e321cd9656bae965e48a82e144c | 8,352 | use std::collections::{HashMap, BTreeSet};
use crate::util::error::Error;
use fs_err::File;
use std::io::{BufReader, BufRead};
use crate::genomics::variant::Variant;
#[derive(Eq, Ord, PartialOrd, PartialEq, Clone, Copy)]
struct Interval {
begin: u32,
end: u32,
}
struct Region {
chrom: String,
interval: Interval,
}
pub(crate) struct Regions {
by_chrom: HashMap<String, Vec<Interval>>,
}
struct RegionsBuffer {
by_chrom: HashMap<String, BTreeSet<Interval>>,
}
impl Interval {
pub fn new(begin: u32, end: u32) -> Interval {
Interval { begin, end }
}
pub fn overlaps(&self, other: &Interval) -> bool {
self.begin < other.end && self.end > other.begin
}
pub fn touches(&self, other: &Interval) -> bool {
self.begin <= other.end && self.end >= other.begin
}
pub fn absorb(&mut self, other: &Interval) {
if self.begin > other.begin { self.begin = other.begin }
if self.end < other.end { self.end = other.end }
}
}
impl Region {
pub fn new(chrom: String, begin: u32, end: u32) -> Region {
let interval = Interval { begin, end };
Region { chrom, interval }
}
}
impl RegionsBuffer {
fn new() -> RegionsBuffer {
let by_chrom = HashMap::<String, BTreeSet<Interval>>::new();
RegionsBuffer { by_chrom }
}
fn add(&mut self, region: Region) {
let chrom = region.chrom;
let interval = region.interval;
match self.by_chrom.get_mut(chrom.as_str()) {
None => {
let mut intervals = BTreeSet::new();
intervals.insert(interval);
self.by_chrom.insert(chrom, intervals);
}
Some(intervals) => {
intervals.insert(interval);
}
}
}
fn consolidate_intervals(interval_set: &BTreeSet<Interval>) -> Vec<Interval> {
let mut intervals_iter = interval_set.iter();
let mut intervals_consolidated = Vec::<Interval>::new();
if let Some(first_interval) = intervals_iter.next() {
let mut current_interval = *first_interval;
for interval in intervals_iter {
if current_interval.touches(interval) {
current_interval.absorb(interval);
} else {
intervals_consolidated.push(current_interval);
current_interval = *interval;
}
}
intervals_consolidated.push(current_interval);
}
intervals_consolidated
}
pub fn as_regions(&self) -> Regions {
let mut by_chrom: HashMap::<String, Vec<Interval>> = HashMap::new();
for (chrom, interval_set) in &self.by_chrom {
let intervals = RegionsBuffer::consolidate_intervals(interval_set);
by_chrom.insert(chrom.clone(), intervals);
}
Regions { by_chrom }
}
}
impl Regions {
pub(crate) fn load(file: &str) -> Result<Regions, Error> {
let reader = BufReader::new(File::open(file)?);
let mut regions_buffer = RegionsBuffer::new();
for line_result in reader.lines() {
let line = line_result?;
let mut parts = line.split('\t');
let _id = parts.next().ok_or("Need at least four columns.")?;
let chrom = parts.next().ok_or("chrom column missing.")?;
let begin = parts.next().ok_or("chrom column missing.")?.parse::<u32>()?;
let end = parts.next().ok_or("end column missing.")?.parse::<u32>()?;
let region = Region::new(chrom.to_string(), begin, end);
regions_buffer.add(region);
}
Ok(regions_buffer.as_regions())
}
pub(crate) fn overlap(&self, variant: &Variant) -> bool {
let interval = Interval::new(variant.pos, variant.end());
let chrom = &variant.chrom;
match self.by_chrom.get(chrom.as_str()) {
None => { false }
Some(intervals) => { overlaps_intervals(&interval, intervals) }
}
}
}
fn overlaps_intervals(interval: &Interval, intervals: &[Interval]) -> bool {
if intervals.is_empty() {
false
} else {
let mut i_min: usize = 0;
let mut i_max: usize = intervals.len() - 1;
loop {
if i_min == i_max {
break intervals[i_min].overlaps(interval);
} else {
let i_mid = (i_min + i_max) / 2;
let interval_i_mid = intervals[i_mid];
if interval_i_mid.end <= interval.begin {
i_min = i_mid + 1;
} else if interval_i_mid.begin >= interval.end {
if i_mid > 0 {
i_max = i_mid - 1;
} else {
break false;
}
} else {
break true;
}
if i_min > i_max {
break false;
}
}
}
}
}
#[cfg(test)]
mod tests {
use crate::util::error::Error;
use crate::cache::regions::Regions;
use crate::genomics::variant::Variant;
use std::io::{BufWriter, Write};
use fs_err::File;
fn write_regions_line(writer: &mut BufWriter<File>, id: &str, chrom: &str, begin: u32,
end: u32) -> Result<(), Error> {
let line = format!("{}\t{}\t{}\t{}\n", id, chrom, begin, end);
writer.write(line.as_bytes())?;
Ok(())
}
fn write_regions_file(path: &str) -> Result<(), Error> {
let mut writer = BufWriter::new(File::create(path)?);
write_regions_line(&mut writer, "region1", "1", 100, 200)?;
write_regions_line(&mut writer, "region2", "2", 100, 200)?;
write_regions_line(&mut writer, "region3", "2", 200, 300)?;
write_regions_line(&mut writer, "region4", "3", 100, 200)?;
write_regions_line(&mut writer, "region5", "3", 300, 400)?;
writer.flush()?;
Ok(())
}
fn assert_included(regions: &Regions, variant: &Variant) {
assert!(regions.overlap(variant), "Regions should include variant {}, but don't.", variant)
}
fn assert_not_included(regions: &Regions, variant: &Variant) {
assert!(!regions.overlap(variant), "Regions shouldn't include variant {}, but do.",
variant)
}
fn new_variant(chrom: &str, pos: u32, ref_allele: &str, alt_allele: &str) -> Variant {
Variant::new(chrom.to_string(), pos, ref_allele.to_string(),
alt_allele.to_string())
}
#[test]
fn load_and_test_regions() -> Result<(), Error> {
let regions_file_path = "tmp/regions.tsv";
write_regions_file(®ions_file_path)?;
let regions = Regions::load(regions_file_path)?;
assert_eq!(regions.by_chrom.len(), 3);
assert_eq!(regions.by_chrom.get("1").unwrap().len(), 1);
assert_eq!(regions.by_chrom.get("2").unwrap().len(), 1);
assert_eq!(regions.by_chrom.get("3").unwrap().len(), 2);
assert_not_included(®ions,
&new_variant("1", 50, "A", "T"));
assert_included(®ions,
&new_variant("1", 150, "A", "T"));
assert_not_included(®ions,
&new_variant("1", 250, "A", "T"));
assert_not_included(®ions,
&new_variant("2", 50, "A", "T"));
assert_included(®ions,
&new_variant("2", 150, "A", "T"));
assert_included(®ions,
&new_variant("2", 250, "A", "T"));
assert_not_included(®ions,
&new_variant("2", 350, "A", "T"));
assert_not_included(®ions,
&new_variant("3", 50, "A", "T"));
assert_included(®ions,
&new_variant("3", 150, "A", "T"));
assert_not_included(®ions,
&new_variant("3", 250, "A", "T"));
assert_included(®ions,
&new_variant("3", 350, "A", "T"));
assert_not_included(®ions,
&new_variant("3", 450, "A", "T"));
assert_not_included(®ions,
&new_variant("X", 50, "A", "T"));
Ok(())
}
}
| 36.155844 | 99 | 0.534363 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.