hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
f872dba85877a8710173a37653a368c2c1145a6d
| 5,390 |
use clap::Clap;
use geo::prelude::EuclideanDistance;
use gre::*;
use noise::*;
use std::f64::consts::PI;
use svg::node::element::path::Data;
use svg::node::element::*;
fn art(opts: Opts) -> Vec<Group> {
let boundaries = (10.0, 10.0, 280.0, 200.0);
let center = (
boundaries.0 + (boundaries.2 - boundaries.0) * 0.5,
boundaries.1 + (boundaries.3 - boundaries.1) * 0.5,
);
let spins = 8.;
let radius = 80.;
let lines = 500;
let precision = 1.0;
let length = 200;
let colors = vec!["black", "green"];
colors
.iter()
.enumerate()
.map(|(i, color)| {
let mut counters_passages = vec![0; 280 * 200];
let mut passage_mm = |p: (f64, f64)| {
let i = (p.1 as usize).max(0).min(199)
* 280
+ (p.0 as usize).max(0).min(279);
let v = counters_passages[i] + 1;
counters_passages[i] = v;
v
};
let mut data = Data::new();
let perlin = Perlin::new();
// give the field angle (not the length)
let field = |(x, y): (f64, f64), l: f64| {
4.0 * perlin.get([
3.0 * x,
3.0 * y,
1.0 + opts.seed + i as f64 * 0.01,
]) + 1.0
* perlin.get([
8.0 * x,
8.0 * y,
2.0 + opts.seed + l * 0.05,
])
};
let initial_positions = (0..lines)
.map(|l| {
let p = (l as f64 + i as f64 * 0.5)
/ (lines as f64);
let amp = radius * (p);
let a = spins * p * 2. * PI;
(
boundaries.0
+ (boundaries.2 - boundaries.0)
* 0.5
+ amp * a.cos(),
boundaries.1
+ (boundaries.3 - boundaries.1)
* 0.5
+ amp * a.sin(),
)
})
.collect();
let mut last_angles: Vec<f64> = (0..lines)
.map(|l| {
(spins
* ((l as f64 + i as f64 * 0.5)
/ (lines as f64))
* 2.
* PI
+ PI)
% (2. * PI)
})
.collect();
let mut build_route =
|p: (f64, f64), l, route_i| {
let normalized =
normalize_in_boundaries(
p, boundaries,
);
let mut angle = field(
normalized,
(l as f64) / (lines as f64),
);
let last_angle: f64 =
last_angles[route_i];
if (angle - last_angle).abs() > 0.5 * PI
{
angle += PI;
}
last_angles[route_i] = angle;
let next = (
p.0 + precision * angle.cos(),
p.1 + precision * angle.sin(),
);
let passage = passage_mm(next);
let ends = euclidian_dist(next, center)
> radius
|| passage > 3
|| i > length
|| out_of_boundaries(
next, boundaries,
);
if ends {
None
} else {
Some((next, false))
}
};
let mut routes =
build_routes_with_collision_par(
initial_positions,
&mut build_route,
);
let mut croute = Vec::new();
for c in 0..1000 {
let p = (c as f64) / 1000.;
let a = p * 2. * PI;
let amp = radius - i as f64 * 0.4;
croute.push((
center.0 + amp * a.cos(),
center.1 + amp * a.sin(),
));
}
croute.push(croute[0]);
routes.push(croute);
for route in routes {
data = render_route(data, route);
}
let mut l = layer(color);
l = l.add(base_path(color, 0.2, data));
if i == colors.len() - 1 {
l = l.add(signature(
1.0,
(260.0, 190.0),
color,
));
}
l
})
.collect()
}
#[derive(Clap)]
#[clap()]
struct Opts {
#[clap(short, long, default_value = "1.0")]
seed: f64,
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(opts);
let mut document = base_a4_landscape("white");
for g in groups {
document = document.add(g);
}
svg::save("image.svg", &document).unwrap();
}
| 31.156069 | 60 | 0.339518 |
180d3993ec8d8bc4b3a628ca6566f12bf9c0c684
| 7,003 |
// Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
use std::ops::Deref;
use tari_crypto::{commitment::HomomorphicCommitmentFactory, tari_utilities::Hashable};
use helpers::{
block_builders::chain_block_with_new_coinbase,
database::create_orphan_block,
sample_blockchains::{create_blockchain_db_no_cut_through, create_new_blockchain},
};
use tari_common::configuration::Network;
use tari_common_types::types::CommitmentFactory;
use tari_core::{
blocks::Block,
chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, PrunedOutput},
transactions::{
tari_amount::T,
test_helpers::schema_to_transaction,
transaction_entities::{TransactionOutput, UnblindedOutput},
CryptoFactories,
},
txn_schema,
};
use tari_test_utils::runtime::test_async;
#[allow(dead_code)]
mod helpers;
/// Finds the UTXO in a block corresponding to the unblinded output. We have to search for outputs because UTXOs get
/// sorted in blocks, and so the order they were inserted in can change.
fn find_utxo(output: &UnblindedOutput, block: &Block, factory: &CommitmentFactory) -> Option<TransactionOutput> {
for utxo in block.body.outputs().iter() {
if factory.open_value(&output.spending_key, output.value.into(), &utxo.commitment) {
return Some(utxo.clone());
}
}
None
}
#[test]
fn fetch_async_headers() {
let (db, blocks, _, _) = create_blockchain_db_no_cut_through();
test_async(move |rt| {
let db = AsyncBlockchainDb::new(db);
for block in blocks.into_iter() {
let height = block.height();
let hash = block.hash().clone();
let db = db.clone();
rt.spawn(async move {
let header_height = db.fetch_header(height).await.unwrap().unwrap();
let header_hash = db.fetch_header_by_block_hash(hash).await.unwrap().unwrap();
assert_eq!(block.header(), &header_height);
assert_eq!(block.header(), &header_hash);
});
}
});
}
#[test]
fn async_rewind_to_height() {
let (db, blocks, _, _) = create_blockchain_db_no_cut_through();
test_async(move |rt| {
let db = AsyncBlockchainDb::new(db);
rt.spawn(async move {
db.rewind_to_height(2).await.unwrap();
let result = db.fetch_block(3).await;
assert!(result.is_err());
let block = db.fetch_block(2).await.unwrap();
assert_eq!(block.confirmations(), 1);
assert_eq!(blocks[2].block(), block.block());
});
});
}
#[test]
fn fetch_async_utxo() {
let (adb, blocks, outputs, _) = create_blockchain_db_no_cut_through();
let factory = CommitmentFactory::default();
// Retrieve a UTXO and an STXO
let utxo = find_utxo(&outputs[4][0], blocks[4].block(), &factory).unwrap();
let stxo = find_utxo(&outputs[1][0], blocks[1].block(), &factory).unwrap();
test_async(move |rt| {
let db = AsyncBlockchainDb::new(adb.clone());
let db2 = AsyncBlockchainDb::new(adb);
rt.spawn(async move {
let utxo_check = db.fetch_utxo(utxo.hash()).await.unwrap().unwrap();
assert_eq!(utxo_check, PrunedOutput::NotPruned { output: utxo });
});
rt.spawn(async move {
let stxo_check = db2.fetch_utxo(stxo.hash()).await.unwrap().unwrap();
assert_eq!(stxo_check, PrunedOutput::NotPruned { output: stxo });
});
});
}
#[test]
fn fetch_async_block() {
let (db, blocks, _, _) = create_blockchain_db_no_cut_through();
test_async(move |rt| {
let db = AsyncBlockchainDb::new(db);
rt.spawn(async move {
for block in blocks.into_iter() {
let height = block.height();
let block_check = db.fetch_block(height).await.unwrap();
assert_eq!(block.block(), block_check.block());
}
});
});
}
#[test]
fn async_add_new_block() {
let network = Network::LocalNet;
let (db, blocks, outputs, consensus_manager) = create_new_blockchain(network);
let schema = vec![txn_schema!(from: vec![outputs[0][0].clone()], to: vec![20 * T, 20 * T])];
let txns = schema_to_transaction(&schema)
.0
.iter()
.map(|t| t.deref().clone())
.collect();
let new_block = chain_block_with_new_coinbase(
blocks.last().unwrap(),
txns,
&consensus_manager,
&CryptoFactories::default(),
)
.0;
let new_block = db.prepare_new_block(new_block).unwrap();
test_async(|rt| {
let db = AsyncBlockchainDb::new(db);
rt.spawn(async move {
let result = db.add_block(new_block.clone().into()).await.unwrap();
let block = db.fetch_block(1).await.unwrap();
match result {
BlockAddResult::Ok(_) => assert_eq!(Block::from(block).hash(), new_block.hash()),
_ => panic!("Unexpected result"),
}
});
});
}
#[test]
fn async_add_block_fetch_orphan() {
let (db, _, _, consensus) = create_blockchain_db_no_cut_through();
let orphan = create_orphan_block(7, vec![], &consensus);
let block_hash = orphan.hash();
test_async(move |rt| {
let db = AsyncBlockchainDb::new(db);
rt.spawn(async move {
db.add_block(orphan.clone().into()).await.unwrap();
let block = db.fetch_orphan(block_hash).await.unwrap();
assert_eq!(orphan, block);
});
});
}
| 38.478022 | 118 | 0.649579 |
f8b472fdc2dcdc96222279825d248f6de389da03
| 51,451 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn machine_learning_compute(&self) -> machine_learning_compute::Client {
machine_learning_compute::Client(self.clone())
}
pub fn operationalization_clusters(&self) -> operationalization_clusters::Client {
operationalization_clusters::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
OperationalizationClusters_Get(#[from] operationalization_clusters::get::Error),
#[error(transparent)]
OperationalizationClusters_CreateOrUpdate(#[from] operationalization_clusters::create_or_update::Error),
#[error(transparent)]
OperationalizationClusters_Update(#[from] operationalization_clusters::update::Error),
#[error(transparent)]
OperationalizationClusters_Delete(#[from] operationalization_clusters::delete::Error),
#[error(transparent)]
OperationalizationClusters_ListKeys(#[from] operationalization_clusters::list_keys::Error),
#[error(transparent)]
OperationalizationClusters_CheckUpdate(#[from] operationalization_clusters::check_update::Error),
#[error(transparent)]
OperationalizationClusters_UpdateSystem(#[from] operationalization_clusters::update_system::Error),
#[error(transparent)]
OperationalizationClusters_ListByResourceGroup(#[from] operationalization_clusters::list_by_resource_group::Error),
#[error(transparent)]
OperationalizationClusters_ListBySubscriptionId(#[from] operationalization_clusters::list_by_subscription_id::Error),
#[error(transparent)]
MachineLearningCompute_ListAvailableOperations(#[from] machine_learning_compute::list_available_operations::Error),
}
pub mod operationalization_clusters {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
cluster_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
cluster_name: cluster_name.into(),
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
cluster_name: impl Into<String>,
parameters: impl Into<models::OperationalizationCluster>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
cluster_name: cluster_name.into(),
parameters: parameters.into(),
}
}
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
cluster_name: impl Into<String>,
parameters: impl Into<models::OperationalizationClusterUpdateParameters>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
cluster_name: cluster_name.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
cluster_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
cluster_name: cluster_name.into(),
}
}
pub fn list_keys(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
cluster_name: impl Into<String>,
) -> list_keys::Builder {
list_keys::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
cluster_name: cluster_name.into(),
}
}
pub fn check_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
cluster_name: impl Into<String>,
) -> check_update::Builder {
check_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
cluster_name: cluster_name.into(),
}
}
pub fn update_system(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
cluster_name: impl Into<String>,
) -> update_system::Builder {
update_system::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
cluster_name: cluster_name.into(),
}
}
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
skiptoken: None,
}
}
pub fn list_by_subscription_id(&self, subscription_id: impl Into<String>) -> list_by_subscription_id::Builder {
list_by_subscription_id::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
skiptoken: None,
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) cluster_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationalizationCluster, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cluster_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationalizationCluster =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::OperationalizationCluster),
Created201(models::OperationalizationCluster),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) cluster_name: String,
pub(crate) parameters: models::OperationalizationCluster,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cluster_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationalizationCluster =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationalizationCluster =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) cluster_name: String,
pub(crate) parameters: models::OperationalizationClusterUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationalizationCluster, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cluster_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationalizationCluster =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) cluster_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.cluster_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_keys {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::OperationalizationClusterCredentials),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) cluster_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{}/listKeys" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . cluster_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationalizationClusterCredentials =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod check_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::CheckUpdateResponse),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) cluster_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{}/checkUpdate" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . cluster_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CheckUpdateResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod update_system {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::UpdateSystemResponse),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) cluster_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{}/updateSystem" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . cluster_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::UpdateSystemResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) skiptoken: Option<String>,
}
impl Builder {
pub fn skiptoken(mut self, skiptoken: impl Into<String>) -> Self {
self.skiptoken = Some(skiptoken.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::PaginatedOperationalizationClustersList, Error>>
{
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(skiptoken) = &self.skiptoken {
url.query_pairs_mut().append_pair("$skiptoken", skiptoken);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PaginatedOperationalizationClustersList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
pub mod list_by_subscription_id {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) skiptoken: Option<String>,
}
impl Builder {
pub fn skiptoken(mut self, skiptoken: impl Into<String>) -> Self {
self.skiptoken = Some(skiptoken.into());
self
}
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::PaginatedOperationalizationClustersList, Error>>
{
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.MachineLearningCompute/operationalizationClusters",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(skiptoken) = &self.skiptoken {
url.query_pairs_mut().append_pair("$skiptoken", skiptoken);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PaginatedOperationalizationClustersList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod machine_learning_compute {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_available_operations(&self) -> list_available_operations::Builder {
list_available_operations::Builder { client: self.0.clone() }
}
}
pub mod list_available_operations {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AvailableOperations, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.MachineLearningCompute/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AvailableOperations =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
| 51.144135 | 286 | 0.532837 |
c1341744bb79f5e361765fd87354a93853031f7b
| 3,707 |
#[doc = "Register `EVENTS_TXPTRUPD` reader"]
pub struct R(crate::R<EVENTS_TXPTRUPD_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<EVENTS_TXPTRUPD_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<EVENTS_TXPTRUPD_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<EVENTS_TXPTRUPD_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `EVENTS_TXPTRUPD` writer"]
pub struct W(crate::W<EVENTS_TXPTRUPD_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<EVENTS_TXPTRUPD_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<EVENTS_TXPTRUPD_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<EVENTS_TXPTRUPD_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `EVENTS_TXPTRUPD` reader - "]
pub struct EVENTS_TXPTRUPD_R(crate::FieldReader<bool, bool>);
impl EVENTS_TXPTRUPD_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
EVENTS_TXPTRUPD_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for EVENTS_TXPTRUPD_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EVENTS_TXPTRUPD` writer - "]
pub struct EVENTS_TXPTRUPD_W<'a> {
w: &'a mut W,
}
impl<'a> EVENTS_TXPTRUPD_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0"]
#[inline(always)]
pub fn events_txptrupd(&self) -> EVENTS_TXPTRUPD_R {
EVENTS_TXPTRUPD_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0"]
#[inline(always)]
pub fn events_txptrupd(&mut self) -> EVENTS_TXPTRUPD_W {
EVENTS_TXPTRUPD_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "The TDX.PTR register has been copied to internal double-buffers. When the I2S module is started and TX is enabled, this event will be generated for every RXTXD.MAXCNT words that are sent on the SDOUT pin.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_txptrupd](index.html) module"]
pub struct EVENTS_TXPTRUPD_SPEC;
impl crate::RegisterSpec for EVENTS_TXPTRUPD_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [events_txptrupd::R](R) reader structure"]
impl crate::Readable for EVENTS_TXPTRUPD_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [events_txptrupd::W](W) writer structure"]
impl crate::Writable for EVENTS_TXPTRUPD_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets EVENTS_TXPTRUPD to value 0"]
impl crate::Resettable for EVENTS_TXPTRUPD_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 32.517544 | 600 | 0.62908 |
aceb37146b72d745a53d4bd1777307fab10974e8
| 13,665 |
// Copyright 2020-2021 the Deno authors. All rights reserved. MIT license.
use super::{Context, LintRule, ProgramRef, DUMMY_NODE};
use swc_common::Span;
use swc_ecmascript::ast::{
Class, ClassMember, Constructor, Expr, ExprOrSuper, ReturnStmt, Stmt,
};
use swc_ecmascript::visit::noop_visit_type;
use swc_ecmascript::visit::Node;
use swc_ecmascript::visit::Visit;
pub struct ConstructorSuper;
// This rule currently differs from the ESlint implementation
// as there is currently no way of handling code paths in dlint
impl LintRule for ConstructorSuper {
fn new() -> Box<Self> {
Box::new(ConstructorSuper)
}
fn tags(&self) -> &'static [&'static str] {
&["recommended"]
}
fn code(&self) -> &'static str {
"constructor-super"
}
fn lint_program(&self, context: &mut Context, program: ProgramRef<'_>) {
let mut visitor = ConstructorSuperVisitor::new(context);
match program {
ProgramRef::Module(ref m) => visitor.visit_module(m, &DUMMY_NODE),
ProgramRef::Script(ref s) => visitor.visit_script(s, &DUMMY_NODE),
}
}
fn docs(&self) -> &'static str {
r#"Verifies the correct usage of constructors and calls to `super()`.
Defined constructors of derived classes (e.g. `class A extends B`) must always call
`super()`. Classes which extend non-constructors (e.g. `class A extends null`) must
not have a constructor.
### Invalid:
```typescript
class A {}
class Z {
constructor() {}
}
class B extends Z {
constructor() {} // missing super() call
}
class C {
constructor() {
super(); // Syntax error
}
}
class D extends null {
constructor() {} // illegal constructor
}
class E extends null {
constructor() { // illegal constructor
super();
}
}
```
### Valid:
```typescript
class A {}
class B extends A {}
class C extends A {
constructor() {
super();
}
}
class D extends null {}
```
"#
}
}
enum DiagnosticKind {
TooManySuper,
NoSuper,
UnnecessaryConstructor,
UnnecessarySuper,
}
impl DiagnosticKind {
#[cfg(test)]
fn message_and_hint(&self) -> (&'static str, &'static str) {
(self.message(), self.hint())
}
fn message(&self) -> &'static str {
match *self {
DiagnosticKind::TooManySuper => "Constructors of derived classes must call super() only once",
DiagnosticKind::NoSuper => "Constructors of derived classes must call super()",
DiagnosticKind::UnnecessaryConstructor => "Classes which inherit from a non constructor must not define a constructor",
DiagnosticKind::UnnecessarySuper => "Constructors of non derived classes must not call super()",
}
}
fn hint(&self) -> &'static str {
match *self {
DiagnosticKind::TooManySuper => "Remove extra calls to super()",
DiagnosticKind::NoSuper => "Add call to super() in the constructor",
DiagnosticKind::UnnecessaryConstructor => "Remove constructor",
DiagnosticKind::UnnecessarySuper => "Remove call to super()",
}
}
}
fn inherits_from_non_constructor(class: &Class) -> bool {
if let Some(expr) = &class.super_class {
if let Expr::Lit(_) = &**expr {
return true;
}
}
false
}
fn super_call_spans(constructor: &Constructor) -> Vec<Span> {
if let Some(block_stmt) = &constructor.body {
block_stmt
.stmts
.iter()
.filter_map(|stmt| extract_super_span(stmt))
.collect()
} else {
vec![]
}
}
fn extract_super_span(stmt: &Stmt) -> Option<Span> {
if let Stmt::Expr(expr) = stmt {
if let Expr::Call(call) = &*expr.expr {
if matches!(&call.callee, ExprOrSuper::Super(_)) {
return Some(call.span);
}
}
}
None
}
fn return_before_super(constructor: &Constructor) -> Option<&ReturnStmt> {
if let Some(block_stmt) = &constructor.body {
for stmt in &block_stmt.stmts {
if extract_super_span(stmt).is_some() {
return None;
}
if let Stmt::Return(ret) = stmt {
return Some(ret);
}
}
}
None
}
struct ConstructorSuperVisitor<'c> {
context: &'c mut Context,
}
impl<'c> ConstructorSuperVisitor<'c> {
fn new(context: &'c mut Context) -> Self {
Self { context }
}
fn check_constructor(&mut self, constructor: &Constructor, class: &Class) {
// Declarations shouldn't be linted
if constructor.body.is_none() {
return;
}
// returning value is a substitute of 'super()'.
if let Some(ret) = return_before_super(constructor) {
if ret.arg.is_none() && class.super_class.is_some() {
let kind = DiagnosticKind::NoSuper;
self.context.add_diagnostic_with_hint(
constructor.span,
"constructor-super",
kind.message(),
kind.hint(),
);
}
return;
}
if inherits_from_non_constructor(class) {
let kind = DiagnosticKind::UnnecessaryConstructor;
self.context.add_diagnostic_with_hint(
constructor.span,
"constructor-super",
kind.message(),
kind.hint(),
);
return;
}
let super_calls = super_call_spans(constructor);
// in case where there are more than one `super()` calls.
for exceeded_super_span in super_calls.iter().skip(1) {
let kind = DiagnosticKind::TooManySuper;
self.context.add_diagnostic_with_hint(
*exceeded_super_span,
"constructor-super",
kind.message(),
kind.hint(),
);
}
match (super_calls.is_empty(), class.super_class.is_some()) {
(true, true) => {
let kind = DiagnosticKind::NoSuper;
self.context.add_diagnostic_with_hint(
constructor.span,
"constructor-super",
kind.message(),
kind.hint(),
);
}
(false, false) => {
let kind = DiagnosticKind::UnnecessarySuper;
self.context.add_diagnostic_with_hint(
super_calls[0],
"constructor-super",
kind.message(),
kind.hint(),
);
}
_ => {}
}
}
}
impl<'c> Visit for ConstructorSuperVisitor<'c> {
noop_visit_type!();
fn visit_class(&mut self, class: &Class, parent: &dyn Node) {
for member in &class.body {
if let ClassMember::Constructor(constructor) = member {
self.check_constructor(constructor, class);
}
}
swc_ecmascript::visit::visit_class(self, class, parent);
}
}
// most tests are taken from ESlint, commenting those
// requiring code path support
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn constructor_super_valid() {
assert_lint_ok! {
ConstructorSuper,
// non derived classes.
"class A { }",
"class A { constructor() { } }",
// inherit from non constructors.
// those are valid if we don't define the constructor.
"class A extends null { }",
// derived classes.
"class A extends B { }",
"class A extends B { constructor() { super(); } }",
// TODO(magurotuna): control flow analysis is required to handle these cases
// "class A extends B { constructor() { if (true) { super(); } else { super(); } } }",
// "class A extends B { constructor() { a ? super() : super(); } }",
// "class A extends B { constructor() { if (a) super(); else super(); } }",
// "class A extends B { constructor() { switch (a) { case 0: super(); break; default: super(); } } }",
// "class A extends B { constructor() { try {} finally { super(); } } }",
// "class A extends B { constructor() { if (a) throw Error(); super(); } }",
// derived classes.
"class A extends (class B {}) { constructor() { super(); } }",
"class A extends (B = C) { constructor() { super(); } }",
"class A extends (B || C) { constructor() { super(); } }",
"class A extends (a ? B : C) { constructor() { super(); } }",
"class A extends (B, C) { constructor() { super(); } }",
// nested.
"class A { constructor() { class B extends C { constructor() { super(); } } } }",
"class A extends B { constructor() { super(); class C extends D { constructor() { super(); } } } }",
"class A extends B { constructor() { super(); class C { constructor() { } } } }",
// returning value is a substitute of 'super()'.
"class A extends B { constructor() { if (true) return a; super(); } }",
"class A extends null { constructor() { return a; } }",
"class A { constructor() { return a; } }",
// https://github.com/eslint/eslint/issues/5261
"class A extends B { constructor(a) { super(); for (const b of a) { this.a(); } } }",
// https://github.com/eslint/eslint/issues/5319
"class Foo extends Object { constructor(method) { super(); this.method = method || function() {}; } }",
// https://github.com/denoland/deno_lint/issues/464
"declare class DOMException extends Error {
constructor(message?: string, name?: string);
}"
};
}
#[test]
fn constructor_super_invalid() {
let (too_many_super_message, too_many_super_hint) =
DiagnosticKind::TooManySuper.message_and_hint();
let (no_super_message, no_super_hint) =
DiagnosticKind::NoSuper.message_and_hint();
let (unnecessary_constructor_message, unnecessary_constructor_hint) =
DiagnosticKind::UnnecessaryConstructor.message_and_hint();
let (unnecessary_super_message, unnecessary_super_hint) =
DiagnosticKind::UnnecessarySuper.message_and_hint();
assert_lint_err! {
ConstructorSuper,
"class A { constructor() { super(); } }": [
{
col: 26,
message: unnecessary_super_message,
hint: unnecessary_super_hint,
}
],
"class A extends null { constructor() { super(); } }": [
{
col: 23,
message: unnecessary_constructor_message,
hint: unnecessary_constructor_hint,
}
],
"class A extends null { constructor() { } }": [
{
col: 23,
message: unnecessary_constructor_message,
hint: unnecessary_constructor_hint,
}
],
"class A extends 1000 { constructor() { super(); } }": [
{
col: 23,
message: unnecessary_constructor_message,
hint: unnecessary_constructor_hint,
}
],
"class A extends 'ab' { constructor() { super(); } }": [
{
col: 23,
message: unnecessary_constructor_message,
hint: unnecessary_constructor_hint,
}
],
"class A extends B { constructor() { } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class A extends B { constructor() { for (var a of b) super.foo(); } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class A extends B { constructor() { class C extends D { constructor() { super(); } } } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class A extends B { constructor() { var c = class extends D { constructor() { super(); } } } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class A extends B { constructor() { var c = () => super(); } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class A extends B { constructor() { class C extends D { constructor() { super(); } } } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class A extends B { constructor() { var C = class extends D { constructor() { super(); } } } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class A extends B { constructor() { super(); super(); } }": [
{
col: 45,
message: too_many_super_message,
hint: too_many_super_hint,
}
],
"class A extends B { constructor() { return; super(); } }": [
{
col: 20,
message: no_super_message,
hint: no_super_hint,
}
],
"class Foo extends Bar { constructor() { for (a in b) for (c in d); } }": [
{
col: 24,
message: no_super_message,
hint: no_super_hint,
}
],
r#"
class A extends B {
constructor() {
class C extends D {
constructor() {}
}
super();
}
}
"#: [
{
line: 5,
col: 6,
message: no_super_message,
hint: no_super_hint,
}
],
r#"
class A extends B {
constructor() {
super();
}
foo() {
class C extends D {
constructor() {}
}
}
}
"#: [
{
line: 8,
col: 6,
message: no_super_message,
hint: no_super_hint,
}
],
r#"
class A extends B {
constructor() {
class C extends null {
constructor() {
super();
}
}
super();
}
}
"#: [
{
line: 5,
col: 6,
message: unnecessary_constructor_message,
hint: unnecessary_constructor_hint,
}
],
r#"
class A extends B {
constructor() {
class C extends null {
constructor() {}
}
super();
}
}
"#: [
{
line: 5,
col: 6,
message: unnecessary_constructor_message,
hint: unnecessary_constructor_hint,
}
]
};
}
}
| 27.059406 | 125 | 0.568606 |
0a5d6a257288d9a489e0a235c4f6ea41cd1aed1f
| 517 |
use quote::__private::TokenStream;
use quote::{quote, ToTokens, TokenStreamExt};
pub enum VNodeType {
Widget,
ContainerWidget,
TopLevelContainerWidget,
}
impl ToTokens for VNodeType {
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.append_all(match self {
VNodeType::Widget => quote! { Widget },
VNodeType::ContainerWidget => quote! { ContainerWidget },
VNodeType::TopLevelContainerWidget => quote! { TopLevelContainerWidget },
});
}
}
| 27.210526 | 85 | 0.651838 |
01c36011337f366efc79a092035681d3390ab151
| 3,535 |
use std::sync::{Arc, Mutex};
use glutin::platform::run_return::EventLoopExtRunReturn;
use crate::{assets::AssetManager, context::Context, engine::Engine, system::System};
use self::builder::AppBuilder;
mod builder;
pub struct App<'a, T> where T: System {
name: &'a str,
system: Arc<Mutex<T>>,
size: (u32, u32),
context: Option<Context>
}
impl<'a, T> App<'a, T> where T: System {
pub fn builder() -> AppBuilder<'a, T> {
AppBuilder::default()
}
pub fn run(mut self) {
let mut asset_manager = AssetManager::new();
asset_manager.awake_hotreload("./assets".into());
let (engine, mut event_loop) = Engine::new(self.name.to_string(), self.size.0, self.size.1, asset_manager.clone());
let context = Context::new(engine, asset_manager);
self.context = Some(context);
// Awake engine
self.context.as_mut().unwrap().engine_mut().awake();
// Awake system
self.system.lock().unwrap().awake(self.context.as_mut().unwrap());
// Start event loop
let start_time = std::time::Instant::now();
event_loop.run_return(move |event, _, control_flow| {
use glutin::event::{Event, WindowEvent};
use glutin::event_loop::ControlFlow;
*control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => return,
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Resized(e) => {
unsafe {
gl::Viewport(
0,
0,
e.width as gl::types::GLint,
e.height as gl::types::GLint,
);
};
}
_ => (),
},
Event::RedrawRequested(_) => {
let renderer = self.context.as_mut().unwrap().engine_mut().renderer_mut();
// Clear screen
renderer.clear();
#[cfg(debug_assertions)]
renderer.update_editor();
self.system.lock().unwrap().update(self.context.as_mut().unwrap());
let renderer = self.context.as_mut().unwrap().engine_mut().renderer_mut();
renderer.swap_buffers();
self.system.lock().unwrap().late_update(self.context.as_mut().unwrap());
}
_ => (),
}
match *control_flow {
ControlFlow::Exit => (),
_ => {
let renderer = self.context.as_mut().unwrap().engine_mut().renderer_mut();
renderer.window().window().request_redraw();
let elapsed_time = std::time::Instant::now().duration_since(start_time);
let elapsed_time = elapsed_time.as_millis() as u64;
let wait_millis = match 1000 / 144 >= elapsed_time {
true => 1000 / 144 - elapsed_time,
false => 0,
};
let new_inst = start_time + std::time::Duration::from_millis(wait_millis);
*control_flow = ControlFlow::WaitUntil(new_inst);
}
}
});
}
}
| 36.071429 | 123 | 0.487129 |
7694383ab07e9851b984127a366372f180bc2958
| 283 |
use num_enum::{IntoPrimitive, FromPrimitive};
#[derive(IntoPrimitive, FromPrimitive, Clone, Copy, PartialEq, PartialOrd, Debug)]
#[repr(u8)]
pub enum Backend {
DEFAULT = 0,
OPENGL = 1,
VULKAN = 2,
METAL = 3,
NOOP = 4,
#[num_enum(default)]
UNKNOWN = 255
}
| 21.769231 | 82 | 0.636042 |
e6ffe5cc3e3c4817b5c0656727825827eebd76fa
| 3,171 |
use std::{collections::HashMap, marker::PhantomData};
use serde::de::DeserializeOwned;
use serde_json::{Map, Value};
use crate::format_name::FormatName;
use super::{
json::{JsonData, JsonDataInfo},
Output, OutputResult,
};
pub struct JsonCompactOutput<T> {
phantom: PhantomData<T>,
}
impl<T> Default for JsonCompactOutput<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> JsonCompactOutput<T> {
pub fn new() -> Self {
Self {
phantom: PhantomData,
}
}
}
pub type GeneralJsonCompactOutput = JsonCompactOutput<HashMap<String, Value>>;
impl<T> Output for JsonCompactOutput<T>
where
T: DeserializeOwned,
{
type Row = T;
type Info = JsonDataInfo;
type Error = serde_json::Error;
fn format_name() -> FormatName {
FormatName::JsonCompact
}
fn deserialize(&self, slice: &[u8]) -> OutputResult<Self::Row, Self::Info, Self::Error> {
self.deserialize_with::<Value>(slice)
}
}
impl<T> JsonCompactOutput<T>
where
T: DeserializeOwned,
{
pub(crate) fn deserialize_with<V>(
&self,
slice: &[u8],
) -> OutputResult<<Self as Output>::Row, <Self as Output>::Info, <Self as Output>::Error>
where
V: DeserializeOwned + Into<Value>,
{
let json_data_tmp: JsonData<Vec<V>> = serde_json::from_slice(slice)?;
let keys: Vec<_> = json_data_tmp
.meta
.iter()
.map(|x| x.name.to_owned())
.collect();
let mut data: Vec<T> = vec![];
for values in json_data_tmp.data.into_iter() {
let map: Map<_, _> = keys
.iter()
.zip(values)
.map(|(k, v)| (k.to_owned(), v.into()))
.collect();
data.push(serde_json::from_value(Value::Object(map))?);
}
Ok((
data,
JsonDataInfo {
meta: json_data_tmp.meta,
rows: json_data_tmp.rows,
statistics: json_data_tmp.statistics,
},
))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{error, fs, path::PathBuf};
use crate::test_helpers::{TestRow, TEST_ROW_1};
#[test]
fn simple() -> Result<(), Box<dyn error::Error>> {
let file_path = PathBuf::new().join("tests/files/JSONCompact.json");
let content = fs::read_to_string(&file_path)?;
assert_eq!(
GeneralJsonCompactOutput::format_name(),
file_path
.file_stem()
.unwrap()
.to_string_lossy()
.parse()
.unwrap()
);
let (rows, info) = GeneralJsonCompactOutput::new().deserialize(&content.as_bytes()[..])?;
assert_eq!(
rows.first().unwrap().get("tuple1").unwrap(),
&Value::Array(vec![1.into(), "a".into()])
);
assert_eq!(info.rows, 2);
let (rows, info) =
JsonCompactOutput::<TestRow>::new().deserialize(&content.as_bytes()[..])?;
assert_eq!(rows.first().unwrap(), &*TEST_ROW_1);
assert_eq!(info.rows, 2);
Ok(())
}
}
| 25.368 | 97 | 0.540839 |
0afb4874e1ceb5dbccda032a940efe518fefc1c6
| 4,976 |
extern crate regex;
use self::regex::Regex;
use std::str::FromStr;
use std::ascii::AsciiExt;
use std::collections::HashMap;
use instruction::{Instruction, Label};
#[derive(Debug, PartialEq)]
struct Line {
label: Option<Label>,
insn: Option<Instruction>
}
/* Matches an optional label followed by an optional instruction. Whitespace or empty string matches as well */
static LINE_RE: &'static str = r"\s*((?P<label>\S+):)?\s*((?P<insn>\S+.*))?";
impl FromStr for Line {
type Err = &'static str;
fn from_str(line: &str) -> Result<Line, Self::Err> {
let re = Regex::new(LINE_RE).unwrap(); // TODO optimize regex compilation
match re.captures(&line.to_ascii_uppercase()) {
Some(caps) => {
let label = caps.name("label").map(|s| s.to_string());
/* No insn regex match is ok. Else return Err() from parse_insn() or Ok(Some(Insn)) */
let insn: Result<Option<Instruction>, Self::Err> = caps.name("insn")
.map_or(Ok(None), |s| Instruction::from_str(s).map(|i| Some(i)));
insn.map(|insn| Line { insn: insn, label: label })
},
None => Err("Unparsed line"),
}
}
}
fn parse_program(p: &str) -> Result<Vec<Line>, &'static str> {
let line_strs: Vec<&str> = p.lines().collect();
let mut lines = Vec::with_capacity(line_strs.len());
for line_str in line_strs {
lines.push(try!(Line::from_str(line_str)));
}
Ok(lines)
}
#[derive(Debug, PartialEq)]
pub struct InstructionLine {
insn: Instruction,
srcline: u32,
}
#[derive(Debug, PartialEq)]
pub struct Executable {
lines: Vec<InstructionLine>,
labels: HashMap<Label, u32>,
}
impl Executable {
pub fn line_at(&self, line: usize) -> &InstructionLine {
&self.lines[line]
}
pub fn insn_at(&self, line: usize) -> &Instruction {
&self.line_at(line).insn
}
pub fn srcline_at(&self, line: usize) -> u32 {
self.line_at(line).srcline
}
pub fn len(&self) -> usize {
self.lines.len()
}
pub fn label_line(&self, label: &str) -> u32 {
self.labels[label]
}
}
pub fn parse(p: &str) -> Result<Executable, &'static str> {
let mut lines = try!(parse_program(p));
let validlines = lines.iter().filter(|l| l.insn != None).count();
let numlabels = lines.iter().filter(|l| l.label != None).count();
let mut executable = Executable { lines: Vec::with_capacity(validlines),
labels: HashMap::with_capacity(numlabels) };
/* Would like a better consuming iterator */
for i in 0..lines.len() as u32 {
let l = lines.remove(0);
if let Some(insn) = l.insn {
executable.lines.push(InstructionLine { insn: insn, srcline: i });
}
if let Some(label) = l.label {
executable.labels.insert(label, i);
}
}
assert_eq!(executable.lines.len(), validlines);
assert_eq!(executable.labels.len(), numlabels);
/* Resolve label pointers from src line to instruction # */
for (_, lineno) in executable.labels.iter_mut() {
let mut i = 0;
for insnline in executable.lines.iter() {
if insnline.srcline >= *lineno {
*lineno = i;
break;
}
i += 1;
}
assert!(i < executable.lines.len() as u32);
}
/* Make sure all JMP labels exist */
for line in executable.lines.iter() {
if let Instruction::J { cond: _, ref dst } = line.insn {
if !executable.labels.contains_key(dst) {
return Err("Jump to undefined label");
}
}
}
Ok(executable)
}
#[cfg(test)]
mod tests {
use super::{Line, parse};
use std::str::FromStr;
#[test]
fn test_parse_line() {
use instruction;
use instruction::Instruction;
fn l(s: &str) -> Result<Line, &'static str> {
println!("{}", s);
Line::from_str(s)
}
assert_eq!(l("foo: NOP").unwrap(), Line { label: Some("FOO".to_string()), insn: Some(Instruction::NOP) });
/* Label with : has questionable utility */
assert_eq!(l("foo:: NOP").unwrap(), Line { label: Some("FOO:".to_string()), insn: Some(Instruction::NOP) });
assert_eq!(l(" NOP ").unwrap(), Line { label: None, insn: Some(Instruction::NOP) });
assert_eq!(l("").unwrap(), Line { label: None, insn: None });
assert_eq!(l("SUB b c").unwrap_err(), instruction::BAD_OPCODE_ERR);
assert_eq!(l("a b c d").unwrap_err(), instruction::NUM_ARGS_ERR);
}
#[test]
fn test_parse() {
let e1 = parse("TOP:\nNOP\nJMP TOP").unwrap();
let e2 = parse("\nTOP:NOP\nJMP TOP").unwrap();
assert_eq!(e1.lines.len(), e2.lines.len());
for (l1, l2) in e1.lines.iter().zip(e2.lines.iter()) {
assert_eq!(l1.insn, l2.insn);
}
}
}
| 30.527607 | 116 | 0.566519 |
f7744b5b3f39b21c7514fcfffd1b27ced19e0ba9
| 27,498 |
// Copyright 2015 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
use {der, Error};
use ring::signature;
use untrusted;
/// X.509 certificates and related items that are signed are almost always
/// encoded in the format "tbs||signatureAlgorithm||signature". This structure
/// captures this pattern.
pub struct SignedData<'a> {
/// The signed data. This would be `tbsCertificate` in the case of an X.509
/// certificate, `tbsResponseData` in the case of an OCSP response, and the
/// data nested in the `digitally-signed` construct for TLS 1.2 signed
/// data.
data: untrusted::Input<'a>,
/// The value of the `AlgorithmIdentifier`. This would be
/// `signatureAlgorithm` in the case of an X.509 certificate or OCSP
/// response. This would have to be synthesized in the case of TLS 1.2
/// signed data, since TLS does not identify algorithms by ASN.1 OIDs.
pub algorithm: untrusted::Input<'a>,
/// The value of the signature. This would be `signature` in an X.509
/// certificate or OCSP response. This would be the value of
/// `DigitallySigned.signature` for TLS 1.2 signed data.
signature: untrusted::Input<'a>,
}
/// Parses the concatenation of "tbs||signatureAlgorithm||signature" that
/// is common in the X.509 certificate and OCSP response syntaxes.
///
/// X.509 Certificates (RFC 5280) look like this:
///
/// ```ASN.1
/// Certificate (SEQUENCE) {
/// tbsCertificate TBSCertificate,
/// signatureAlgorithm AlgorithmIdentifier,
/// signatureValue BIT STRING
/// }
///
/// OCSP responses (RFC 6960) look like this:
///
/// ```ASN.1
/// BasicOCSPResponse {
/// tbsResponseData ResponseData,
/// signatureAlgorithm AlgorithmIdentifier,
/// signature BIT STRING,
/// certs [0] EXPLICIT SEQUENCE OF Certificate OPTIONAL
/// }
/// ```
///
/// Note that this function does NOT parse the outermost `SEQUENCE` or the
/// `certs` value.
///
/// The return value's first component is the contents of
/// `tbsCertificate`/`tbsResponseData`; the second component is a `SignedData`
/// structure that can be passed to `verify_signed_data`.
pub fn parse_signed_data<'a>(der: &mut untrusted::Reader<'a>)
-> Result<(untrusted::Input<'a>, SignedData<'a>),
Error> {
let mark1 = der.mark();
let tbs = der::expect_tag_and_get_value(der, der::Tag::Sequence)?;
let mark2 = der.mark();
let data = der.get_input_between_marks(mark1, mark2).unwrap();
let algorithm = der::expect_tag_and_get_value(der, der::Tag::Sequence)?;
let signature = der::bit_string_with_no_unused_bits(der)?;
Ok((tbs, SignedData { data, algorithm, signature }))
}
/// Verify `signed_data` using the public key in the DER-encoded
/// SubjectPublicKeyInfo `spki` using one of the algorithms in
/// `supported_algorithms`.
///
/// The algorithm is chosen based on the algorithm information encoded in the
/// algorithm identifiers in `public_key` and `signed_data.algorithm`. The
/// ordering of the algorithms in `supported_algorithms` does not really matter,
/// but generally more common algorithms should go first, as it is scanned
/// linearly for matches.
pub fn verify_signed_data(supported_algorithms: &[&SignatureAlgorithm],
spki_value: untrusted::Input,
signed_data: &SignedData) -> Result<(), Error> {
// We need to verify the signature in `signed_data` using the public key
// in `public_key`. In order to know which *ring* signature verification
// algorithm to use, we need to know the public key algorithm (ECDSA,
// RSA PKCS#1, etc.), the curve (if applicable), and the digest algorithm.
// `signed_data` identifies only the public key algorithm and the digest
// algorithm, and `public_key` identifies only the public key algorithm and
// the curve (if any). Thus, we have to combine information from both
// inputs to figure out which `ring::signature::VerificationAlgorithm` to
// use to verify the signature.
//
// This is all further complicated by the fact that we don't have any
// implicit knowledge about any algorithms or identifiers, since all of
// that information is encoded in `supported_algorithms.` In particular, we
// avoid hard-coding any of that information so that (link-time) dead code
// elimination will work effectively in eliminating code for unused
// algorithms.
// Parse the signature.
//
let mut found_signature_alg_match = false;
for supported_alg in supported_algorithms.iter()
.filter(|alg| alg.signature_alg_id
.matches_algorithm_id_value(signed_data.algorithm)) {
match verify_signature(supported_alg, spki_value, signed_data.data,
signed_data.signature) {
Err(Error::UnsupportedSignatureAlgorithmForPublicKey) => {
found_signature_alg_match = true;
continue;
},
result => { return result; },
}
}
if found_signature_alg_match {
Err(Error::UnsupportedSignatureAlgorithmForPublicKey)
} else {
Err(Error::UnsupportedSignatureAlgorithm)
}
}
pub fn verify_signature(signature_alg: &SignatureAlgorithm,
spki_value: untrusted::Input, msg: untrusted::Input,
signature: untrusted::Input) -> Result<(), Error> {
let spki = parse_spki_value(spki_value)?;
if !signature_alg.public_key_alg_id
.matches_algorithm_id_value(spki.algorithm_id_value) {
return Err(Error::UnsupportedSignatureAlgorithmForPublicKey);
}
signature::verify(signature_alg.verification_alg, spki.key_value, msg,
signature)
.map_err(|_| Error::InvalidSignatureForPublicKey)
}
struct SubjectPublicKeyInfo<'a> {
algorithm_id_value: untrusted::Input<'a>,
key_value: untrusted::Input<'a>,
}
// Parse the public key into an algorithm OID, an optional curve OID, and the
// key value. The caller needs to check whether these match the
// `PublicKeyAlgorithm` for the `SignatureAlgorithm` that is matched when
// parsing the signature.
fn parse_spki_value(input: untrusted::Input)
-> Result<SubjectPublicKeyInfo, Error> {
input.read_all(Error::BadDER, |input| {
let algorithm_id_value =
der::expect_tag_and_get_value(input, der::Tag::Sequence)?;
let key_value = der::bit_string_with_no_unused_bits(input)?;
Ok(SubjectPublicKeyInfo {
algorithm_id_value: algorithm_id_value,
key_value: key_value,
})
})
}
/// A signature algorithm.
pub struct SignatureAlgorithm {
public_key_alg_id: AlgorithmIdentifier,
signature_alg_id: AlgorithmIdentifier,
verification_alg: &'static signature::VerificationAlgorithm,
}
/// ECDSA signatures using the P-256 curve and SHA-256.
pub static ECDSA_P256_SHA256: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: ECDSA_P256,
signature_alg_id: ECDSA_SHA256,
verification_alg: &signature::ECDSA_P256_SHA256_ASN1,
};
/// ECDSA signatures using the P-256 curve and SHA-384. Deprecated.
pub static ECDSA_P256_SHA384: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: ECDSA_P256,
signature_alg_id: ECDSA_SHA384,
verification_alg: &signature::ECDSA_P256_SHA384_ASN1,
};
/// ECDSA signatures using the P-384 curve and SHA-256. Deprecated.
pub static ECDSA_P384_SHA256: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: ECDSA_P384,
signature_alg_id: ECDSA_SHA256,
verification_alg: &signature::ECDSA_P384_SHA256_ASN1,
};
/// ECDSA signatures using the P-384 curve and SHA-384.
pub static ECDSA_P384_SHA384: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: ECDSA_P384,
signature_alg_id: ECDSA_SHA384,
verification_alg: &signature::ECDSA_P384_SHA384_ASN1,
};
/// RSA PKCS#1 1.5 signatures using SHA-1 for keys of 2048-8192 bits.
/// Deprecated.
pub static RSA_PKCS1_2048_8192_SHA1: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PKCS1_SHA1,
verification_alg: &signature::RSA_PKCS1_2048_8192_SHA1,
};
/// RSA PKCS#1 1.5 signatures using SHA-256 for keys of 2048-8192 bits.
pub static RSA_PKCS1_2048_8192_SHA256: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PKCS1_SHA256,
verification_alg: &signature::RSA_PKCS1_2048_8192_SHA256,
};
/// RSA PKCS#1 1.5 signatures using SHA-384 for keys of 2048-8192 bits.
pub static RSA_PKCS1_2048_8192_SHA384: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PKCS1_SHA384,
verification_alg: &signature::RSA_PKCS1_2048_8192_SHA384,
};
/// RSA PKCS#1 1.5 signatures using SHA-512 for keys of 2048-8192 bits.
pub static RSA_PKCS1_2048_8192_SHA512: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PKCS1_SHA512,
verification_alg: &signature::RSA_PKCS1_2048_8192_SHA512,
};
/// RSA PKCS#1 1.5 signatures using SHA-384 for keys of 3072-8192 bits.
pub static RSA_PKCS1_3072_8192_SHA384: SignatureAlgorithm = SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PKCS1_SHA384,
verification_alg: &signature::RSA_PKCS1_3072_8192_SHA384,
};
/// RSA PSS signatures using SHA-256 for keys of 2048-8192 bits and of
/// type rsaEncryption; see https://tools.ietf.org/html/rfc4055#section-1.2
pub static RSA_PSS_2048_8192_SHA256_LEGACY_KEY: SignatureAlgorithm =
SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PSS_SHA256,
verification_alg: &signature::RSA_PSS_2048_8192_SHA256,
};
/// RSA PSS signatures using SHA-384 for keys of 2048-8192 bits and of
/// type rsaEncryption; see https://tools.ietf.org/html/rfc4055#section-1.2
pub static RSA_PSS_2048_8192_SHA384_LEGACY_KEY: SignatureAlgorithm =
SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PSS_SHA384,
verification_alg: &signature::RSA_PSS_2048_8192_SHA384,
};
/// RSA PSS signatures using SHA-512 for keys of 2048-8192 bits and of
/// type rsaEncryption; see https://tools.ietf.org/html/rfc4055#section-1.2
pub static RSA_PSS_2048_8192_SHA512_LEGACY_KEY: SignatureAlgorithm =
SignatureAlgorithm {
public_key_alg_id: RSA_ENCRYPTION,
signature_alg_id: RSA_PSS_SHA512,
verification_alg: &signature::RSA_PSS_2048_8192_SHA512,
};
struct AlgorithmIdentifier {
asn1_id_value: &'static [u8],
}
impl AlgorithmIdentifier {
fn matches_algorithm_id_value(&self, encoded: untrusted::Input) -> bool {
encoded == self.asn1_id_value
}
}
// See src/data/README.md.
const ECDSA_P256: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-ecdsa-p256.der"),
};
const ECDSA_P384: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-ecdsa-p384.der"),
};
const ECDSA_SHA256: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-ecdsa-sha256.der"),
};
const ECDSA_SHA384: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-ecdsa-sha384.der"),
};
const RSA_ENCRYPTION: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-encryption.der"),
};
const RSA_PKCS1_SHA1: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-pkcs1-sha1.der"),
};
const RSA_PKCS1_SHA256: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-pkcs1-sha256.der"),
};
const RSA_PKCS1_SHA384: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-pkcs1-sha384.der"),
};
const RSA_PKCS1_SHA512: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-pkcs1-sha512.der"),
};
const RSA_PSS_SHA256: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-pss-sha256.der"),
};
const RSA_PSS_SHA384: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-pss-sha384.der"),
};
const RSA_PSS_SHA512: AlgorithmIdentifier = AlgorithmIdentifier {
asn1_id_value: include_bytes!("data/alg-rsa-pss-sha512.der"),
};
#[cfg(test)]
mod tests {
use base64;
use std;
use std::io::BufRead;
use {der, Error, signed_data};
use untrusted;
// TODO: The expected results need to be modified for SHA-1 deprecation.
macro_rules! test_verify_signed_data {
($fn_name:ident, $file_name:expr, $expected_result:expr) => {
#[test]
fn $fn_name() {
test_verify_signed_data($file_name, $expected_result);
}
}
}
fn test_verify_signed_data(file_name: &str,
expected_result: Result<(), Error>) {
let tsd = parse_test_signed_data(file_name);
let spki_value = untrusted::Input::from(&tsd.spki);
let spki_value = spki_value.read_all(Error::BadDER, |input| {
der::expect_tag_and_get_value(input, der::Tag::Sequence)
}).unwrap();
// we can't use `parse_signed_data` because it requires `data`
// to be an ASN.1 SEQUENCE, and that isn't the case with
// Chromium's test data. TODO: The test data set should be
// expanded with SEQUENCE-wrapped data so that we can actually
// test `parse_signed_data`.
let algorithm = untrusted::Input::from(&tsd.algorithm);
let algorithm = algorithm.read_all(Error::BadDER, |input| {
der::expect_tag_and_get_value(input, der::Tag::Sequence)
}).unwrap();
let signature = untrusted::Input::from(&tsd.signature);
let signature = signature.read_all(Error::BadDER, |input| {
der::bit_string_with_no_unused_bits(input)
}).unwrap();
let signed_data = signed_data::SignedData {
data: untrusted::Input::from(&tsd.data),
algorithm: algorithm,
signature: signature
};
assert_eq!(expected_result,
signed_data::verify_signed_data(
SUPPORTED_ALGORITHMS_IN_TESTS, spki_value,
&signed_data));
}
// XXX: This is testing code that isn't even in this module.
macro_rules! test_verify_signed_data_signature_outer {
($fn_name:ident, $file_name:expr, $expected_result:expr) => {
#[test]
fn $fn_name() {
test_verify_signed_data_signature_outer($file_name,
$expected_result);
}
}
}
fn test_verify_signed_data_signature_outer(file_name: &str,
expected_error: Error) {
let tsd = parse_test_signed_data(file_name);
let signature = untrusted::Input::from(&tsd.signature);
assert_eq!(Err(expected_error),
signature.read_all(Error::BadDER, |input| {
der::bit_string_with_no_unused_bits(input)
}));
}
// XXX: This is testing code that is not even in this module.
macro_rules! test_parse_spki_bad_outer {
($fn_name:ident, $file_name:expr, $error:expr) => {
#[test]
fn $fn_name() {
test_parse_spki_bad_outer($file_name, $error)
}
}
}
fn test_parse_spki_bad_outer(file_name: &str, expected_error: Error) {
let tsd = parse_test_signed_data(file_name);
let spki = untrusted::Input::from(&tsd.spki);
assert_eq!(Err(expected_error),
spki.read_all(Error::BadDER, |input| {
der::expect_tag_and_get_value(input, der::Tag::Sequence)
}));
}
// XXX: Some of the BadDER tests should have better error codes, maybe?
// XXX: We should have a variant of this test with a SHA-256 digest that gives
// `Error::UnsupportedSignatureAlgorithmForPublicKey`.
test_verify_signed_data!(
test_ecdsa_prime256v1_sha512_spki_params_null,
"ecdsa-prime256v1-sha512-spki-params-null.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data_signature_outer!(
test_ecdsa_prime256v1_sha512_unused_bits_signature,
"ecdsa-prime256v1-sha512-unused-bits-signature.pem",
Error::BadDER);
// XXX: We should have a variant of this test with a SHA-256 digest that gives
// `Error::UnsupportedSignatureAlgorithmForPublicKey`.
test_verify_signed_data!(
test_ecdsa_prime256v1_sha512_using_ecdh_key,
"ecdsa-prime256v1-sha512-using-ecdh-key.pem",
Err(Error::UnsupportedSignatureAlgorithm));
// XXX: We should have a variant of this test with a SHA-256 digest that gives
// `Error::UnsupportedSignatureAlgorithmForPublicKey`.
test_verify_signed_data!(
test_ecdsa_prime256v1_sha512_using_ecmqv_key,
"ecdsa-prime256v1-sha512-using-ecmqv-key.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(
test_ecdsa_prime256v1_sha512_using_rsa_algorithm,
"ecdsa-prime256v1-sha512-using-rsa-algorithm.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
// XXX: We should have a variant of this test with a SHA-256 digest that gives
// `Error::InvalidSignatureForPublicKey`.
test_verify_signed_data!(
test_ecdsa_prime256v1_sha512_wrong_signature_format,
"ecdsa-prime256v1-sha512-wrong-signature-format.pem",
Err(Error::UnsupportedSignatureAlgorithm));
// Differs from Chromium because we don't support P-256 with SHA-512.
test_verify_signed_data!(
test_ecdsa_prime256v1_sha512,
"ecdsa-prime256v1-sha512.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(test_ecdsa_secp384r1_sha256_corrupted_data,
"ecdsa-secp384r1-sha256-corrupted-data.pem",
Err(Error::InvalidSignatureForPublicKey));
test_verify_signed_data!(test_ecdsa_secp384r1_sha256,
"ecdsa-secp384r1-sha256.pem", Ok(()));
test_verify_signed_data!(
test_ecdsa_using_rsa_key, "ecdsa-using-rsa-key.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
test_parse_spki_bad_outer!(test_rsa_pkcs1_sha1_bad_key_der_length,
"rsa-pkcs1-sha1-bad-key-der-length.pem",
Error::BadDER);
test_parse_spki_bad_outer!(test_rsa_pkcs1_sha1_bad_key_der_null,
"rsa-pkcs1-sha1-bad-key-der-null.pem",
Error::BadDER);
test_verify_signed_data!(test_rsa_pkcs1_sha1_key_params_absent,
"rsa-pkcs1-sha1-key-params-absent.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
test_verify_signed_data!(
test_rsa_pkcs1_sha1_using_pss_key_no_params,
"rsa-pkcs1-sha1-using-pss-key-no-params.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
test_verify_signed_data!(test_rsa_pkcs1_sha1_wrong_algorithm,
"rsa-pkcs1-sha1-wrong-algorithm.pem",
Err(Error::InvalidSignatureForPublicKey));
// XXX: RSA PKCS#1 with SHA-1 is a supported algorithm, but we only accept
// 2048-8192 bit keys, and this test file is using a 1024 bit key. Thus,
// our results differ from Chromium's. TODO: this means we need a 2048+ bit
// version of this test.
test_verify_signed_data!(test_rsa_pkcs1_sha1, "rsa-pkcs1-sha1.pem",
Err(Error::InvalidSignatureForPublicKey));
// XXX: RSA PKCS#1 with SHA-1 is a supported algorithm, but we only accept
// 2048-8192 bit keys, and this test file is using a 1024 bit key. Thus,
// our results differ from Chromium's. TODO: this means we need a 2048+ bit
// version of this test.
test_verify_signed_data!(test_rsa_pkcs1_sha256, "rsa-pkcs1-sha256.pem",
Err(Error::InvalidSignatureForPublicKey));
test_parse_spki_bad_outer!(test_rsa_pkcs1_sha256_key_encoded_ber,
"rsa-pkcs1-sha256-key-encoded-ber.pem",
Error::BadDER);
test_verify_signed_data!(test_rsa_pkcs1_sha256_spki_non_null_params,
"rsa-pkcs1-sha256-spki-non-null-params.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
test_verify_signed_data!(
test_rsa_pkcs1_sha256_using_ecdsa_algorithm,
"rsa-pkcs1-sha256-using-ecdsa-algorithm.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
test_verify_signed_data!(
test_rsa_pkcs1_sha256_using_id_ea_rsa,
"rsa-pkcs1-sha256-using-id-ea-rsa.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
// Chromium's PSS test are for parameter combinations we don't support.
test_verify_signed_data!(test_rsa_pss_sha1_salt20_using_pss_key_no_params,
"rsa-pss-sha1-salt20-using-pss-key-no-params.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(
test_rsa_pss_sha1_salt20_using_pss_key_with_null_params,
"rsa-pss-sha1-salt20-using-pss-key-with-null-params.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(test_rsa_pss_sha1_salt20, "rsa-pss-sha1-salt20.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(test_rsa_pss_sha1_wrong_salt,
"rsa-pss-sha1-wrong-salt.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(test_rsa_pss_sha256_mgf1_sha512_salt33,
"rsa-pss-sha256-mgf1-sha512-salt33.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(
test_rsa_pss_sha256_salt10_using_pss_key_with_params,
"rsa-pss-sha256-salt10-using-pss-key-with-params.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(
test_rsa_pss_sha256_salt10_using_pss_key_with_wrong_params,
"rsa-pss-sha256-salt10-using-pss-key-with-wrong-params.pem",
Err(Error::UnsupportedSignatureAlgorithm));
test_verify_signed_data!(test_rsa_pss_sha256_salt10,
"rsa-pss-sha256-salt10.pem",
Err(Error::UnsupportedSignatureAlgorithm));
// Our PSS tests that should work.
test_verify_signed_data!(
test_rsa_pss_sha256_salt32,
"ours/rsa-pss-sha256-salt32.pem",
Ok(()));
test_verify_signed_data!(
test_rsa_pss_sha384_salt48,
"ours/rsa-pss-sha384-salt48.pem",
Ok(()));
test_verify_signed_data!(
test_rsa_pss_sha512_salt64,
"ours/rsa-pss-sha512-salt64.pem",
Ok(()));
test_verify_signed_data!(
test_rsa_pss_sha256_salt32_corrupted_data,
"ours/rsa-pss-sha256-salt32-corrupted-data.pem",
Err(Error::InvalidSignatureForPublicKey));
test_verify_signed_data!(
test_rsa_pss_sha384_salt48_corrupted_data,
"ours/rsa-pss-sha384-salt48-corrupted-data.pem",
Err(Error::InvalidSignatureForPublicKey));
test_verify_signed_data!(
test_rsa_pss_sha512_salt64_corrupted_data,
"ours/rsa-pss-sha512-salt64-corrupted-data.pem",
Err(Error::InvalidSignatureForPublicKey));
test_verify_signed_data!(
test_rsa_using_ec_key, "rsa-using-ec-key.pem",
Err(Error::UnsupportedSignatureAlgorithmForPublicKey));
test_verify_signed_data!(test_rsa2048_pkcs1_sha512,
"rsa2048-pkcs1-sha512.pem", Ok(()));
struct TestSignedData {
spki: std::vec::Vec<u8>,
data: std::vec::Vec<u8>,
algorithm: std::vec::Vec<u8>,
signature: std::vec::Vec<u8>
}
fn parse_test_signed_data(file_name: &str) -> TestSignedData {
let path =
std::path::PathBuf::from(
"third-party/chromium/data/verify_signed_data").join(file_name);
let file = std::fs::File::open(path).unwrap();
let mut lines = std::io::BufReader::new(&file).lines();
let spki = read_pem_section(&mut lines, "PUBLIC KEY");
let algorithm = read_pem_section(&mut lines, "ALGORITHM");
let data = read_pem_section(&mut lines, "DATA");
let signature = read_pem_section(&mut lines, "SIGNATURE");
TestSignedData { spki, data, algorithm, signature }
}
type FileLines<'a> = std::io::Lines<std::io::BufReader<&'a std::fs::File>>;
fn read_pem_section(lines: & mut FileLines, section_name: &str)
-> std::vec::Vec<u8> {
// Skip comments and header
let begin_section = format!("-----BEGIN {}-----", section_name);
loop {
let line = lines.next().unwrap().unwrap();
if line == begin_section {
break;
}
}
let mut base64 = std::string::String::new();
let end_section = format!("-----END {}-----", section_name);
loop {
let line = lines.next().unwrap().unwrap();
if line == end_section {
break;
}
base64.push_str(&line);
}
base64::decode(&base64).unwrap()
}
static SUPPORTED_ALGORITHMS_IN_TESTS:
&'static [&'static signed_data::SignatureAlgorithm] = &[
// Reasonable algorithms.
&signed_data::RSA_PKCS1_2048_8192_SHA256,
&signed_data::ECDSA_P256_SHA256,
&signed_data::ECDSA_P384_SHA384,
&signed_data::RSA_PKCS1_2048_8192_SHA384,
&signed_data::RSA_PKCS1_2048_8192_SHA512,
&signed_data::RSA_PKCS1_3072_8192_SHA384,
&signed_data::RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
&signed_data::RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
&signed_data::RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
// Algorithms deprecated because they are annoying (P-521) or because
// they are nonsensical combinations.
&signed_data::ECDSA_P256_SHA384, // Truncates digest.
&signed_data::ECDSA_P384_SHA256, // Digest is unnecessarily short.
// Algorithms deprecated because they are bad.
&signed_data::RSA_PKCS1_2048_8192_SHA1, // SHA-1
];
}
| 42.435185 | 84 | 0.680049 |
89d277365119e4abdc4b40bd0bcf53364d1f0ac7
| 16,568 |
use crate::command_args::CommandArgs;
use crate::whole_stream_command::{whole_stream_command, WholeStreamCommand};
use async_trait::async_trait;
use derive_new::new;
use futures::StreamExt;
use log::trace;
use nu_errors::ShellError;
use nu_plugin::jsonrpc::JsonRpc;
use nu_protocol::{Primitive, ReturnValue, Signature, UntaggedValue, Value};
use nu_stream::{OutputStream, ToOutputStream};
use serde::{self, Deserialize, Serialize};
use std::collections::VecDeque;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::Write;
use std::path::Path;
use std::process::{Child, Command, Stdio};
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "method")]
#[allow(non_camel_case_types)]
pub enum NuResult {
response {
params: Result<VecDeque<ReturnValue>, ShellError>,
},
}
enum PluginCommand {
Filter(PluginFilter),
Sink(PluginSink),
}
impl PluginCommand {
fn command(self) -> Result<crate::whole_stream_command::Command, ShellError> {
match self {
PluginCommand::Filter(cmd) => Ok(whole_stream_command(cmd)),
PluginCommand::Sink(cmd) => Ok(whole_stream_command(cmd)),
}
}
}
enum PluginMode {
Filter,
Sink,
}
pub struct PluginCommandBuilder {
mode: PluginMode,
name: String,
path: String,
config: Signature,
}
impl PluginCommandBuilder {
pub fn new(
name: impl Into<String>,
path: impl Into<String>,
config: impl Into<Signature>,
) -> Self {
let config = config.into();
PluginCommandBuilder {
mode: if config.is_filter {
PluginMode::Filter
} else {
PluginMode::Sink
},
name: name.into(),
path: path.into(),
config,
}
}
pub fn build(&self) -> Result<crate::whole_stream_command::Command, ShellError> {
let mode = &self.mode;
let name = self.name.clone();
let path = self.path.clone();
let config = self.config.clone();
let cmd = match mode {
PluginMode::Filter => PluginCommand::Filter(PluginFilter { name, path, config }),
PluginMode::Sink => PluginCommand::Sink(PluginSink { name, path, config }),
};
cmd.command()
}
}
#[derive(new)]
pub struct PluginFilter {
name: String,
path: String,
config: Signature,
}
#[async_trait]
impl WholeStreamCommand for PluginFilter {
fn name(&self) -> &str {
&self.name
}
fn signature(&self) -> Signature {
self.config.clone()
}
fn usage(&self) -> &str {
&self.config.usage
}
async fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> {
run_filter(self.path.clone(), (args)).await
}
}
async fn run_filter(path: String, args: CommandArgs) -> Result<OutputStream, ShellError> {
trace!("filter_plugin :: {}", path);
let bos = futures::stream::iter(vec![
UntaggedValue::Primitive(Primitive::BeginningOfStream).into_untagged_value()
]);
let eos = futures::stream::iter(vec![
UntaggedValue::Primitive(Primitive::EndOfStream).into_untagged_value()
]);
let args = args.evaluate_once().await?;
let real_path = Path::new(&path);
let ext = real_path.extension();
let ps1_file = match ext {
Some(ext) => ext == "ps1",
None => false,
};
let mut child: Child = if ps1_file {
Command::new("pwsh")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.args(&[
"-NoLogo",
"-NoProfile",
"-ExecutionPolicy",
"Bypass",
"-File",
&real_path.to_string_lossy(),
])
.spawn()
.expect("Failed to spawn PowerShell process")
} else {
Command::new(path)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
.expect("Failed to spawn child process")
};
let call_info = args.call_info.clone();
trace!("filtering :: {:?}", call_info);
Ok(bos
.chain(args.input)
.chain(eos)
.map(move |item| {
match item {
Value {
value: UntaggedValue::Primitive(Primitive::BeginningOfStream),
..
} => {
// Beginning of the stream
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
let stdout = child.stdout.as_mut().expect("Failed to open stdout");
let mut reader = BufReader::new(stdout);
let request = JsonRpc::new("begin_filter", call_info.clone());
let request_raw = serde_json::to_string(&request);
trace!("begin_filter:request {:?}", &request_raw);
match request_raw {
Err(_) => {
return OutputStream::one(Err(ShellError::labeled_error(
"Could not load json from plugin",
"could not load json from plugin",
&call_info.name_tag,
)));
}
Ok(request_raw) => {
match stdin.write(format!("{}\n", request_raw).as_bytes()) {
Ok(_) => {}
Err(err) => {
return OutputStream::one(Err(ShellError::unexpected(
format!("{}", err),
)));
}
}
}
}
let mut input = String::new();
match reader.read_line(&mut input) {
Ok(_) => {
let response = serde_json::from_str::<NuResult>(&input);
trace!("begin_filter:response {:?}", &response);
match response {
Ok(NuResult::response { params }) => match params {
Ok(params) => futures::stream::iter(params).to_output_stream(),
Err(e) => futures::stream::iter(vec![ReturnValue::Err(e)])
.to_output_stream(),
},
Err(e) => OutputStream::one(Err(
ShellError::untagged_runtime_error(format!(
"Error while processing begin_filter response: {:?} {}",
e, input
)),
)),
}
}
Err(e) => OutputStream::one(Err(ShellError::untagged_runtime_error(
format!("Error while reading begin_filter response: {:?}", e),
))),
}
}
Value {
value: UntaggedValue::Primitive(Primitive::EndOfStream),
..
} => {
// post stream contents
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
let stdout = child.stdout.as_mut().expect("Failed to open stdout");
let mut reader = BufReader::new(stdout);
let request: JsonRpc<std::vec::Vec<Value>> = JsonRpc::new("end_filter", vec![]);
let request_raw = serde_json::to_string(&request);
trace!("end_filter:request {:?}", &request_raw);
match request_raw {
Err(_) => {
return OutputStream::one(Err(ShellError::labeled_error(
"Could not load json from plugin",
"could not load json from plugin",
&call_info.name_tag,
)));
}
Ok(request_raw) => {
match stdin.write(format!("{}\n", request_raw).as_bytes()) {
Ok(_) => {}
Err(err) => {
return OutputStream::one(Err(ShellError::unexpected(
format!("{}", err),
)));
}
}
}
}
let mut input = String::new();
let stream = match reader.read_line(&mut input) {
Ok(_) => {
let response = serde_json::from_str::<NuResult>(&input);
trace!("end_filter:response {:?}", &response);
match response {
Ok(NuResult::response { params }) => match params {
Ok(params) => futures::stream::iter(params).to_output_stream(),
Err(e) => futures::stream::iter(vec![ReturnValue::Err(e)])
.to_output_stream(),
},
Err(e) => futures::stream::iter(vec![Err(
ShellError::untagged_runtime_error(format!(
"Error while processing end_filter response: {:?} {}",
e, input
)),
)])
.to_output_stream(),
}
}
Err(e) => {
futures::stream::iter(vec![Err(ShellError::untagged_runtime_error(
format!("Error while reading end_filter response: {:?}", e),
))])
.to_output_stream()
}
};
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
let request: JsonRpc<std::vec::Vec<Value>> = JsonRpc::new("quit", vec![]);
let request_raw = serde_json::to_string(&request);
trace!("quit:request {:?}", &request_raw);
match request_raw {
Ok(request_raw) => {
let _ = stdin.write(format!("{}\n", request_raw).as_bytes());
// TODO: Handle error
}
Err(e) => {
return OutputStream::one(Err(ShellError::untagged_runtime_error(
format!("Error while processing quit response: {:?}", e),
)));
}
}
let _ = child.wait();
stream
}
v => {
// Stream contents
let stdin = child.stdin.as_mut().expect("Failed to open stdin");
let stdout = child.stdout.as_mut().expect("Failed to open stdout");
let mut reader = BufReader::new(stdout);
let request = JsonRpc::new("filter", v);
let request_raw = serde_json::to_string(&request);
trace!("filter:request {:?}", &request_raw);
match request_raw {
Ok(request_raw) => {
let _ = stdin.write(format!("{}\n", request_raw).as_bytes());
// TODO: Handle error
}
Err(e) => {
return OutputStream::one(Err(ShellError::untagged_runtime_error(
format!("Error while processing filter response: {:?}", e),
)));
}
}
let mut input = String::new();
match reader.read_line(&mut input) {
Ok(_) => {
let response = serde_json::from_str::<NuResult>(&input);
trace!("filter:response {:?}", &response);
match response {
Ok(NuResult::response { params }) => match params {
Ok(params) => futures::stream::iter(params).to_output_stream(),
Err(e) => futures::stream::iter(vec![ReturnValue::Err(e)])
.to_output_stream(),
},
Err(e) => OutputStream::one(Err(
ShellError::untagged_runtime_error(format!(
"Error while processing filter response: {:?}\n== input ==\n{}",
e, input
)),
)),
}
}
Err(e) => OutputStream::one(Err(ShellError::untagged_runtime_error(
format!("Error while reading filter response: {:?}", e),
))),
}
}
}
})
.flatten()
.to_output_stream())
}
#[derive(new)]
pub struct PluginSink {
name: String,
path: String,
config: Signature,
}
#[async_trait]
impl WholeStreamCommand for PluginSink {
fn name(&self) -> &str {
&self.name
}
fn signature(&self) -> Signature {
self.config.clone()
}
fn usage(&self) -> &str {
&self.config.usage
}
async fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> {
run_sink(self.path.clone(), args).await
}
}
async fn run_sink(path: String, args: CommandArgs) -> Result<OutputStream, ShellError> {
let args = args.evaluate_once().await?;
let call_info = args.call_info.clone();
let input: Vec<Value> = args.input.collect().await;
let request = JsonRpc::new("sink", (call_info.clone(), input));
let request_raw = serde_json::to_string(&request);
if let Ok(request_raw) = request_raw {
if let Ok(mut tmpfile) = tempfile::NamedTempFile::new() {
let _ = writeln!(tmpfile, "{}", request_raw);
let _ = tmpfile.flush();
let real_path = Path::new(&path);
let ext = real_path.extension();
let ps1_file = match ext {
Some(ext) => ext == "ps1",
None => false,
};
// TODO: This sink may not work in powershell, trying to find
// an example of what CallInfo would look like in this temp file
let child = if ps1_file {
Command::new("pwsh")
.args(&[
"-NoLogo",
"-NoProfile",
"-ExecutionPolicy",
"Bypass",
"-File",
&real_path.to_string_lossy(),
&tmpfile
.path()
.to_str()
.expect("Failed getting tmpfile path"),
])
.spawn()
} else {
Command::new(path).arg(&tmpfile.path()).spawn()
};
if let Ok(mut child) = child {
let _ = child.wait();
Ok(OutputStream::empty())
} else {
Err(ShellError::untagged_runtime_error(
"Could not create process for sink command",
))
}
} else {
Err(ShellError::untagged_runtime_error(
"Could not open file to send sink command message",
))
}
} else {
Err(ShellError::untagged_runtime_error(
"Could not create message to sink command",
))
}
}
| 36.654867 | 100 | 0.434995 |
6ab7935c04b1ae8512070d909ed3e1862b3c343f
| 362 |
pub mod protobuf;
pub mod rust;
pub mod sql;
pub use self::rust::RustCodeGenerator;
use crate::model::Model;
pub trait Generator<T> {
type Error;
fn add_model(&mut self, model: Model<T>);
fn models(&self) -> &[Model<T>];
fn models_mut(&mut self) -> &mut [Model<T>];
fn to_string(&self) -> Result<Vec<(String, String)>, Self::Error>;
}
| 18.1 | 70 | 0.632597 |
0e40f37191b97abdab012854904d9ac59f81626c
| 3,509 |
use std::ffi::OsStr;
static COMPILE_COMMANDS: &'static [&str] =
&[r"gcc",
r"g\+\+",
r"cc",
r"c\+\+",
r"clang",
r"clang\+\+",
r"clang-\d+(\.\d+)",
r"clang\+\+-\d+(\.\d+)",
r"gcc-\d+(\.\d+)",
r"g\+\+-\d+(\.\d+)"
];
lazy_static::lazy_static! {
static ref COMPILE_COMMAND_RE : regex::RegexSet = regex::RegexSet::new(COMPILE_COMMANDS).unwrap();
}
pub fn is_compile_command_name(cmd_name : &OsStr) -> bool {
match cmd_name.to_str() {
None => { false }
Some(s) => { COMPILE_COMMAND_RE.is_match(s) }
}
}
static SINGLE_ARG_OPTIONS : &'static [&str] =
&[r"-o",
r"--param",
r"-aux-info",
r"-A",
r"-D",
r"-U",
r"-arch",
r"-MF",
r"-MT",
r"-MQ",
r"-I",
r"-idirafter",
r"-include",
r"-imacros",
r"-iprefix",
r"-iwithprefix",
r"-iwithprefixbefore",
r"-isystem",
r"-isysroot",
r"-iquote",
r"-imultilib",
r"-target",
r"-x",
r"-Xclang",
r"-Xpreprocessor",
r"-Xassembler",
r"-Xlinker",
r"-l",
r"-L",
r"-T",
r"-u",
r"-e",
r"-rpath",
r"-current_version",
r"-compatibility_version"
];
lazy_static::lazy_static! {
static ref SINGLE_ARG_OPTION_RE : regex::RegexSet = regex::RegexSet::new(SINGLE_ARG_OPTIONS).unwrap();
}
pub fn is_unary_option(arg : &OsStr) -> bool {
match arg.to_str() {
None => { false }
Some(arg_str) => {
SINGLE_ARG_OPTION_RE.is_match(arg_str)
}
}
}
lazy_static::lazy_static! {
static ref OTHER_ARG_PREFIX_RE : regex::Regex = regex::Regex::new(r"-.*").unwrap();
}
pub fn is_nullary_option(arg : &OsStr) -> bool {
match arg.to_str() {
None => { false }
Some(arg_str) => {
OTHER_ARG_PREFIX_RE.is_match(arg_str)
}
}
}
static CLANG_ARGUMENT_BLACKLIST : &'static [&str] =
&[r"-fno-tree-loop-im",
r"-Wmaybe-uninitialized",
r"-Wno-maybe-uninitialized",
r"-mindirect-branch-register",
r"-mindirect-branch=.*",
r"-mpreferred-stack-boundary=\d+",
r"-Wframe-address",
r"-Wno-frame-address",
r"-Wno-format-truncation",
r"-Wno-format-overflow",
r"-Wformat-overflow",
r"-Wformat-truncation",
r"-Wpacked-not-aligned",
r"-Wno-packed-not-aligned",
r"-Werror=.*",
r"-Wno-restrict",
r"-Wrestrict",
r"-Wno-unused-but-set-variable",
r"-Wunused-but-set-variable",
r"-Wno-stringop-truncation",
r"-Wno-stringop-overflow",
r"-Wstringop-truncation",
r"-Wstringop-overflow",
r"-Wzero-length-bounds",
r"-Wno-zero-length-bounds",
r"-fno-allow-store-data-races",
r"-fno-var-tracking-assignments",
r"-fmerge-constants",
r"-fconserve-stack",
r"-falign-jumps=\d+",
r"-falign-loops=\d+",
r"-mno-fp-ret-in-387",
r"-mskip-rax-setup",
r"--param=.*"
];
lazy_static::lazy_static! {
static ref CLANG_ARGUMENT_BLACKLIST_RE : regex::RegexSet = regex::RegexSet::new(CLANG_ARGUMENT_BLACKLIST).unwrap();
}
/// Returns true if the argument is not accepted by clang and should be ignored
/// when constructing clang invocations.
pub fn is_blacklisted_clang_argument(a : &OsStr) -> bool {
match a.to_str() {
None => { false }
Some(str_arg) => {
CLANG_ARGUMENT_BLACKLIST_RE.is_match(str_arg)
}
}
}
| 24.711268 | 119 | 0.547164 |
75f147173da4fe13251b81f5bc976c09842acd68
| 13,077 |
//! Test data consumption using low level consumers.
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
use rdkafka::consumer::{BaseConsumer, Consumer, ConsumerContext};
use rdkafka::error::{KafkaError, RDKafkaErrorCode};
use rdkafka::topic_partition_list::{Offset, TopicPartitionList};
use rdkafka::util::{current_time_millis, Timeout};
use rdkafka::{ClientConfig, Message, Timestamp};
use crate::utils::*;
mod utils;
fn create_base_consumer(
group_id: &str,
config_overrides: Option<HashMap<&str, &str>>,
) -> BaseConsumer<ConsumerTestContext> {
consumer_config(group_id, config_overrides)
.create_with_context(ConsumerTestContext { _n: 64 })
.expect("Consumer creation failed")
}
// Seeking should allow replaying messages and skipping messages.
#[tokio::test]
async fn test_produce_consume_seek() {
let _r = env_logger::try_init();
let topic_name = rand_test_topic();
populate_topic(&topic_name, 5, &value_fn, &key_fn, Some(0), None).await;
let consumer = create_base_consumer(&rand_test_group(), None);
consumer.subscribe(&[topic_name.as_str()]).unwrap();
for (i, message) in consumer.iter().take(3).enumerate() {
match message {
Ok(message) => assert_eq!(dbg!(message.offset()), i as i64),
Err(e) => panic!("Error receiving message: {:?}", e),
}
}
consumer
.seek(&topic_name, 0, Offset::Offset(1), None)
.unwrap();
for (i, message) in consumer.iter().take(3).enumerate() {
match message {
Ok(message) => assert_eq!(message.offset(), i as i64 + 1),
Err(e) => panic!("Error receiving message: {:?}", e),
}
}
consumer
.seek(&topic_name, 0, Offset::OffsetTail(3), None)
.unwrap();
for (i, message) in consumer.iter().take(2).enumerate() {
match message {
Ok(message) => assert_eq!(message.offset(), i as i64 + 2),
Err(e) => panic!("Error receiving message: {:?}", e),
}
}
consumer.seek(&topic_name, 0, Offset::End, None).unwrap();
ensure_empty(&consumer, "There should be no messages left");
// Validate that unrepresentable offsets are rejected.
match consumer.seek(&topic_name, 0, Offset::Offset(-1), None) {
Err(KafkaError::Seek(s)) => assert_eq!(s, "Local: Unrepresentable offset"),
bad => panic!("unexpected return from invalid seek: {:?}", bad),
}
let mut tpl = TopicPartitionList::new();
match tpl.add_partition_offset(&topic_name, 0, Offset::OffsetTail(-1)) {
Err(KafkaError::SetPartitionOffset(RDKafkaErrorCode::InvalidArgument)) => (),
bad => panic!(
"unexpected return from invalid add_partition_offset: {:?}",
bad
),
}
match tpl.set_all_offsets(Offset::OffsetTail(-1)) {
Err(KafkaError::SetPartitionOffset(RDKafkaErrorCode::InvalidArgument)) => (),
bad => panic!(
"unexpected return from invalid add_partition_offset: {:?}",
bad
),
}
}
// All produced messages should be consumed.
#[tokio::test]
async fn test_produce_consume_iter() {
let _r = env_logger::try_init();
let start_time = current_time_millis();
let topic_name = rand_test_topic();
let message_map = populate_topic(&topic_name, 100, &value_fn, &key_fn, None, None).await;
let consumer = create_base_consumer(&rand_test_group(), None);
consumer.subscribe(&[topic_name.as_str()]).unwrap();
for message in consumer.iter().take(100) {
match message {
Ok(m) => {
let id = message_map[&(m.partition(), m.offset())];
match m.timestamp() {
Timestamp::CreateTime(timestamp) => assert!(timestamp >= start_time),
_ => panic!("Expected createtime for message timestamp"),
};
assert_eq!(m.payload_view::<str>().unwrap().unwrap(), value_fn(id));
assert_eq!(m.key_view::<str>().unwrap().unwrap(), key_fn(id));
assert_eq!(m.topic(), topic_name.as_str());
}
Err(e) => panic!("Error receiving message: {:?}", e),
}
}
}
fn ensure_empty<C: ConsumerContext>(consumer: &BaseConsumer<C>, err_msg: &str) {
const MAX_TRY_TIME: Duration = Duration::from_secs(2);
let start = Instant::now();
while start.elapsed() < MAX_TRY_TIME {
assert!(consumer.poll(MAX_TRY_TIME).is_none(), "{}", err_msg);
}
}
#[tokio::test]
async fn test_pause_resume_consumer_iter() {
const PAUSE_COUNT: i32 = 3;
const MESSAGE_COUNT: i32 = 300;
const MESSAGES_PER_PAUSE: i32 = MESSAGE_COUNT / PAUSE_COUNT;
let _r = env_logger::try_init();
let topic_name = rand_test_topic();
populate_topic(
&topic_name,
MESSAGE_COUNT,
&value_fn,
&key_fn,
Some(0),
None,
)
.await;
let group_id = rand_test_group();
let consumer = create_base_consumer(&group_id, None);
consumer.subscribe(&[topic_name.as_str()]).unwrap();
for _ in 0..PAUSE_COUNT {
let mut num_taken = 0;
for message in consumer.iter().take(MESSAGES_PER_PAUSE as usize) {
message.unwrap();
num_taken += 1;
}
assert_eq!(num_taken, MESSAGES_PER_PAUSE);
let partitions = consumer.assignment().unwrap();
assert!(partitions.count() > 0);
consumer.pause(&partitions).unwrap();
ensure_empty(
&consumer,
"Partition is paused - we should not receive anything",
);
consumer.resume(&partitions).unwrap();
}
ensure_empty(&consumer, "There should be no messages left");
}
#[tokio::test]
async fn test_consume_partition_order() {
let _r = env_logger::try_init();
let topic_name = rand_test_topic();
populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(0), None).await;
populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(1), None).await;
populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(2), None).await;
// Using partition queues should allow us to consume the partitions
// in a round-robin fashion.
{
let consumer = Arc::new(create_base_consumer(&rand_test_group(), None));
let mut tpl = TopicPartitionList::new();
tpl.add_partition_offset(&topic_name, 0, Offset::Beginning)
.unwrap();
tpl.add_partition_offset(&topic_name, 1, Offset::Beginning)
.unwrap();
tpl.add_partition_offset(&topic_name, 2, Offset::Beginning)
.unwrap();
consumer.assign(&tpl).unwrap();
let partition_queues: Vec<_> = (0..3)
.map(|i| consumer.split_partition_queue(&topic_name, i).unwrap())
.collect();
for _ in 0..4 {
let main_message = consumer.poll(Timeout::After(Duration::from_secs(0)));
assert!(main_message.is_none());
for (i, queue) in partition_queues.iter().enumerate() {
let queue_message = queue.poll(Timeout::Never).unwrap().unwrap();
assert_eq!(queue_message.partition(), i as i32);
}
}
}
// When not all partitions have been split into separate queues, the
// unsplit partitions should still be accessible via the main queue.
{
let consumer = Arc::new(create_base_consumer(&rand_test_group(), None));
let mut tpl = TopicPartitionList::new();
tpl.add_partition_offset(&topic_name, 0, Offset::Beginning)
.unwrap();
tpl.add_partition_offset(&topic_name, 1, Offset::Beginning)
.unwrap();
tpl.add_partition_offset(&topic_name, 2, Offset::Beginning)
.unwrap();
consumer.assign(&tpl).unwrap();
let partition1 = consumer.split_partition_queue(&topic_name, 1).unwrap();
let mut i = 0;
while i < 12 {
if let Some(m) = consumer.poll(Timeout::After(Duration::from_secs(0))) {
let partition = m.unwrap().partition();
assert!(partition == 0 || partition == 2);
i += 1;
}
if let Some(m) = partition1.poll(Timeout::After(Duration::from_secs(0))) {
assert_eq!(m.unwrap().partition(), 1);
i += 1;
}
}
}
// Sending the queue to another thread that is likely to outlive the
// original thread should work. This is not idiomatic, as the consumer
// should be continuously polled to serve callbacks, but it should not panic
// or result in memory unsafety, etc.
{
let consumer = Arc::new(create_base_consumer(&rand_test_group(), None));
let mut tpl = TopicPartitionList::new();
tpl.add_partition_offset(&topic_name, 0, Offset::Beginning)
.unwrap();
consumer.assign(&tpl).unwrap();
let queue = consumer.split_partition_queue(&topic_name, 0).unwrap();
let worker = thread::spawn(move || {
for _ in 0..4 {
let queue_message = queue.poll(Timeout::Never).unwrap().unwrap();
assert_eq!(queue_message.partition(), 0);
}
});
consumer.poll(Duration::from_secs(0));
drop(consumer);
worker.join().unwrap();
}
}
#[tokio::test]
async fn test_produce_consume_message_queue_nonempty_callback() {
let _r = env_logger::try_init();
let topic_name = rand_test_topic();
create_topic(&topic_name, 1).await;
let consumer: BaseConsumer<_> = consumer_config(&rand_test_group(), None)
.create_with_context(ConsumerTestContext { _n: 64 })
.expect("Consumer creation failed");
let consumer = Arc::new(consumer);
let mut tpl = TopicPartitionList::new();
tpl.add_partition_offset(&topic_name, 0, Offset::Beginning)
.unwrap();
consumer.assign(&tpl).unwrap();
let wakeups = Arc::new(AtomicUsize::new(0));
let mut queue = consumer.split_partition_queue(&topic_name, 0).unwrap();
queue.set_nonempty_callback({
let wakeups = wakeups.clone();
move || {
wakeups.fetch_add(1, Ordering::SeqCst);
}
});
let wait_for_wakeups = |target| {
let start = Instant::now();
let timeout = Duration::from_secs(15);
loop {
let w = wakeups.load(Ordering::SeqCst);
if w == target {
break;
} else if w > target {
panic!("wakeups {} exceeds target {}", w, target);
}
thread::sleep(Duration::from_millis(100));
if start.elapsed() > timeout {
panic!("timeout exceeded while waiting for wakeup");
}
}
};
// Initiate connection.
assert!(consumer.poll(Duration::from_secs(0)).is_none());
// Expect no wakeups for 1s.
thread::sleep(Duration::from_secs(1));
assert_eq!(wakeups.load(Ordering::SeqCst), 0);
// Verify there are no messages waiting.
assert!(consumer.poll(Duration::from_secs(0)).is_none());
assert!(queue.poll(Duration::from_secs(0)).is_none());
// Populate the topic, and expect a wakeup notifying us of the new messages.
populate_topic(&topic_name, 2, &value_fn, &key_fn, None, None).await;
wait_for_wakeups(1);
// Read one of the messages.
assert!(queue.poll(Duration::from_secs(0)).is_some());
// Add more messages to the topic. Expect no additional wakeups, as the
// queue is not fully drained, for 1s.
populate_topic(&topic_name, 2, &value_fn, &key_fn, None, None).await;
thread::sleep(Duration::from_secs(1));
assert_eq!(wakeups.load(Ordering::SeqCst), 1);
// Drain the queue.
assert!(queue.poll(None).is_some());
assert!(queue.poll(None).is_some());
assert!(queue.poll(None).is_some());
// Expect no additional wakeups for 1s.
thread::sleep(Duration::from_secs(1));
assert_eq!(wakeups.load(Ordering::SeqCst), 1);
// Add another message, and expect a wakeup.
populate_topic(&topic_name, 1, &value_fn, &key_fn, None, None).await;
wait_for_wakeups(2);
// Expect no additional wakeups for 1s.
thread::sleep(Duration::from_secs(1));
assert_eq!(wakeups.load(Ordering::SeqCst), 2);
// Disable the queue and add another message.
queue.set_nonempty_callback(|| ());
populate_topic(&topic_name, 1, &value_fn, &key_fn, None, None).await;
// Expect no additional wakeups for 1s.
thread::sleep(Duration::from_secs(1));
assert_eq!(wakeups.load(Ordering::SeqCst), 2);
}
#[tokio::test]
async fn test_invalid_consumer_position() {
// Regression test for #360, in which calling `position` on a consumer which
// does not have a `group.id` configured segfaulted.
let consumer: BaseConsumer = ClientConfig::new().create().unwrap();
assert_eq!(
consumer.position(),
Err(KafkaError::MetadataFetch(RDKafkaErrorCode::UnknownGroup))
);
}
| 35.343243 | 93 | 0.616349 |
e41668d52042919c2a2f09848acd683c94d3a317
| 2,494 |
// Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_exception::Result;
use common_tracing::tracing;
use sqlparser::ast::Expr;
use sqlparser::ast::Ident;
use crate::sessions::QueryContext;
use crate::sql::statements::AnalyzableStatement;
use crate::sql::statements::AnalyzedResult;
use crate::sql::PlanParser;
#[derive(Debug, Clone, PartialEq)]
pub enum DfShowFunctions {
All,
Like(Ident),
Where(Expr),
}
#[async_trait::async_trait]
impl AnalyzableStatement for DfShowFunctions {
#[tracing::instrument(level = "debug", skip(self, ctx), fields(ctx.id = ctx.get_id().as_str()))]
async fn analyze(&self, ctx: Arc<QueryContext>) -> Result<AnalyzedResult> {
let rewritten_query = self.rewritten_query(ctx.clone());
let rewritten_query_plan = PlanParser::parse(rewritten_query.as_str(), ctx);
Ok(AnalyzedResult::SimpleQuery(Box::new(
rewritten_query_plan.await?,
)))
}
}
const FUNCTIONS_TABLE: &str = "system.functions";
impl DfShowFunctions {
fn show_all_functions(&self, _ctx: Arc<QueryContext>) -> String {
format!("SELECT * FROM {} ORDER BY name", FUNCTIONS_TABLE,)
}
fn show_functions_with_like(&self, i: &Ident, _ctx: Arc<QueryContext>) -> String {
format!(
"SELECT * FROM {} where name LIKE {} ORDER BY name",
FUNCTIONS_TABLE, i,
)
}
fn show_functions_with_predicate(&self, e: &Expr, _ctx: Arc<QueryContext>) -> String {
format!(
"SELECT * FROM {} where ({}) ORDER BY name",
FUNCTIONS_TABLE, e,
)
}
fn rewritten_query(&self, ctx: Arc<QueryContext>) -> String {
match self {
DfShowFunctions::All => self.show_all_functions(ctx),
DfShowFunctions::Like(i) => self.show_functions_with_like(i, ctx),
DfShowFunctions::Where(e) => self.show_functions_with_predicate(e, ctx),
}
}
}
| 33.253333 | 100 | 0.668404 |
5dad8b23d8b60a48e4f17955cc30a350f2eebedb
| 2,098 |
//! A simple, non-interactive widget for drawing a single **Oval**.
use {Color, Colorable, Dimensions, Sizeable, Widget};
use super::Style as Style;
use widget;
/// A simple, non-interactive widget for drawing a single **Oval**.
#[derive(Copy, Clone, Debug)]
pub struct Oval {
/// Data necessary and common for all widget builder types.
pub common: widget::CommonBuilder,
/// Unique styling.
pub style: Style,
}
/// Unique state for the **Oval**.
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct State;
impl Oval {
/// Build an **Oval** with the given dimensions and style.
pub fn styled(dim: Dimensions, style: Style) -> Self {
Oval {
common: widget::CommonBuilder::new(),
style: style,
}.wh(dim)
}
/// Build a new **Fill**ed **Oval**.
pub fn fill(dim: Dimensions) -> Self {
Oval::styled(dim, Style::fill())
}
/// Build a new **Oval** **Fill**ed with the given color.
pub fn fill_with(dim: Dimensions, color: Color) -> Self {
Oval::styled(dim, Style::fill_with(color))
}
/// Build a new **Outline**d **Oval** widget.
pub fn outline(dim: Dimensions) -> Self {
Oval::styled(dim, Style::outline())
}
/// Build a new **Oval** **Outline**d with the given style.
pub fn outline_styled(dim: Dimensions, line_style: widget::line::Style) -> Self {
Oval::styled(dim, Style::outline_styled(line_style))
}
}
impl Widget for Oval {
type State = State;
type Style = Style;
type Event = ();
fn common(&self) -> &widget::CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut widget::CommonBuilder {
&mut self.common
}
fn init_state(&self) -> State {
State
}
fn style(&self) -> Style {
self.style.clone()
}
fn update(self, _args: widget::UpdateArgs<Self>) -> Self::Event {
// Nothing to be updated here.
}
}
impl Colorable for Oval {
fn color(mut self, color: Color) -> Self {
self.style.set_color(color);
self
}
}
| 23.573034 | 85 | 0.592469 |
0eb0dd9e201c6f43f2fa3a27f8c3aa654a52fc95
| 526 |
use std::fmt::Display;
use std::path::{self, Path, PathBuf};
pub trait DisplayAsDisplay {
fn as_display(&self) -> Self;
}
impl<T: Display> DisplayAsDisplay for &T {
fn as_display(&self) -> Self {
self
}
}
pub trait PathAsDisplay {
fn as_display(&self) -> path::Display<'_>;
}
impl PathAsDisplay for Path {
fn as_display(&self) -> path::Display<'_> {
self.display()
}
}
impl PathAsDisplay for PathBuf {
fn as_display(&self) -> path::Display<'_> {
self.display()
}
}
| 18.137931 | 47 | 0.606464 |
c143bcb9859c1a2700f333552af634ce3e6ff99a
| 1,028 |
use crate::{Message, Subscription};
use std::{
fmt::Debug,
sync::mpsc::{SendError, Sender},
};
#[derive(Debug)]
/// A generic implementation of bus subscription using `std::sync::mpsc`
pub struct Subscriber<M>
where
M: Message + Debug,
{
pub(crate) sender: Sender<<Self as Subscription>::Event>,
pub(crate) discriminant_set: Vec<<M as Message>::Discriminant>,
}
impl<M> PartialEq for Subscriber<M>
where
M: Message + Debug,
{
fn eq(&self, other: &Self) -> bool {
self.discriminant_set.eq(&other.discriminant_set)
}
}
impl<M> Subscription for Subscriber<M>
where
M: Message + Debug,
{
type Event = M;
fn subscribed_to(&self, message: &Self::Event) -> bool {
self.discriminant_set().contains(&message.discriminant())
}
fn discriminant_set(&self) -> &[<Self::Event as Message>::Discriminant] {
&self.discriminant_set
}
fn send_event(&self, message: Self::Event) -> Result<(), SendError<Self::Event>> {
self.sender.send(message)
}
}
| 25.073171 | 86 | 0.645914 |
1d45eb5de7a3cd5b02d5d30aad7567aea55d5136
| 21,904 |
// Internal
use crate::util::{Id, Key};
#[cfg(feature = "yaml")]
use yaml_rust::Yaml;
/// Family of related [arguments].
///
/// By placing arguments in a logical group, you can create easier requirement and
/// exclusion rules instead of having to list each argument individually, or when you want a rule
/// to apply "any but not all" arguments.
///
/// For instance, you can make an entire `ArgGroup` required. If [`ArgGroup::multiple(true)`] is
/// set, this means that at least one argument from that group must be present. If
/// [`ArgGroup::multiple(false)`] is set (the default), one and *only* one must be present.
///
/// You can also do things such as name an entire `ArgGroup` as a [conflict] or [requirement] for
/// another argument, meaning any of the arguments that belong to that group will cause a failure
/// if present, or must present respectively.
///
/// Perhaps the most common use of `ArgGroup`s is to require one and *only* one argument to be
/// present out of a given set. Imagine that you had multiple arguments, and you want one of them
/// to be required, but making all of them required isn't feasible because perhaps they conflict
/// with each other. For example, lets say that you were building an application where one could
/// set a given version number by supplying a string with an option argument, i.e.
/// `--set-ver v1.2.3`, you also wanted to support automatically using a previous version number
/// and simply incrementing one of the three numbers. So you create three flags `--major`,
/// `--minor`, and `--patch`. All of these arguments shouldn't be used at one time but you want to
/// specify that *at least one* of them is used. For this, you can create a group.
///
/// Finally, you may use `ArgGroup`s to pull a value from a group of arguments when you don't care
/// exactly which argument was actually used at runtime.
///
/// # Examples
///
/// The following example demonstrates using an `ArgGroup` to ensure that one, and only one, of
/// the arguments from the specified group is present at runtime.
///
/// ```rust
/// # use clap::{App, arg, ArgGroup, ErrorKind};
/// let result = App::new("app")
/// .arg(arg!(--"set-ver" <ver> "set the version manually").required(false))
/// .arg(arg!(--major "auto increase major"))
/// .arg(arg!(--minor "auto increase minor"))
/// .arg(arg!(--patch "auto increase patch"))
/// .group(ArgGroup::new("vers")
/// .args(&["set-ver", "major", "minor", "patch"])
/// .required(true))
/// .try_get_matches_from(vec!["app", "--major", "--patch"]);
/// // Because we used two args in the group it's an error
/// assert!(result.is_err());
/// let err = result.unwrap_err();
/// assert_eq!(err.kind, ErrorKind::ArgumentConflict);
/// ```
/// This next example shows a passing parse of the same scenario
///
/// ```rust
/// # use clap::{App, arg, ArgGroup};
/// let result = App::new("app")
/// .arg(arg!(--"set-ver" <ver> "set the version manually").required(false))
/// .arg(arg!(--major "auto increase major"))
/// .arg(arg!(--minor "auto increase minor"))
/// .arg(arg!(--patch "auto increase patch"))
/// .group(ArgGroup::new("vers")
/// .args(&["set-ver", "major", "minor","patch"])
/// .required(true))
/// .try_get_matches_from(vec!["app", "--major"]);
/// assert!(result.is_ok());
/// let matches = result.unwrap();
/// // We may not know which of the args was used, so we can test for the group...
/// assert!(matches.is_present("vers"));
/// // we could also alternatively check each arg individually (not shown here)
/// ```
/// [`ArgGroup::multiple(true)`]: ArgGroup::multiple()
///
/// [`ArgGroup::multiple(false)`]: ArgGroup::multiple()
/// [arguments]: crate::Arg
/// [conflict]: crate::Arg::conflicts_with()
/// [requirement]: crate::Arg::requires()
#[derive(Default, Debug, PartialEq, Eq)]
pub struct ArgGroup<'help> {
pub(crate) id: Id,
pub(crate) name: &'help str,
pub(crate) args: Vec<Id>,
pub(crate) required: bool,
pub(crate) requires: Vec<Id>,
pub(crate) conflicts: Vec<Id>,
pub(crate) multiple: bool,
}
impl<'help> ArgGroup<'help> {
pub(crate) fn with_id(id: Id) -> Self {
ArgGroup {
id,
..ArgGroup::default()
}
}
/// Create a `ArgGroup` using a unique name.
///
/// The name will be used to get values from the group or refer to the group inside of conflict
/// and requirement rules.
///
/// # Examples
///
/// ```rust
/// # use clap::{App, ArgGroup};
/// ArgGroup::new("config")
/// # ;
/// ```
pub fn new<S: Into<&'help str>>(n: S) -> Self {
ArgGroup::default().name(n)
}
/// Sets the group name.
///
/// # Examples
///
/// ```rust
/// # use clap::{App, ArgGroup};
/// ArgGroup::default().name("config")
/// # ;
/// ```
pub fn name<S: Into<&'help str>>(mut self, n: S) -> Self {
self.name = n.into();
self.id = Id::from(&self.name);
self
}
/// Adds an [argument] to this group by name
///
/// # Examples
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup};
/// let m = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .group(ArgGroup::new("req_flags")
/// .arg("flag")
/// .arg("color"))
/// .get_matches_from(vec!["myprog", "-f"]);
/// // maybe we don't know which of the two flags was used...
/// assert!(m.is_present("req_flags"));
/// // but we can also check individually if needed
/// assert!(m.is_present("flag"));
/// ```
/// [argument]: crate::Arg
pub fn arg<T: Key>(mut self, arg_id: T) -> Self {
self.args.push(arg_id.into());
self
}
/// Adds multiple [arguments] to this group by name
///
/// # Examples
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup};
/// let m = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"]))
/// .get_matches_from(vec!["myprog", "-f"]);
/// // maybe we don't know which of the two flags was used...
/// assert!(m.is_present("req_flags"));
/// // but we can also check individually if needed
/// assert!(m.is_present("flag"));
/// ```
/// [arguments]: crate::Arg
pub fn args<T: Key>(mut self, ns: &[T]) -> Self {
for n in ns {
self = self.arg(n);
}
self
}
/// Allows more than one of the [`Arg`]s in this group to be used. (Default: `false`)
///
/// # Examples
///
/// Notice in this example we use *both* the `-f` and `-c` flags which are both part of the
/// group
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup};
/// let m = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"])
/// .multiple(true))
/// .get_matches_from(vec!["myprog", "-f", "-c"]);
/// // maybe we don't know which of the two flags was used...
/// assert!(m.is_present("req_flags"));
/// ```
/// In this next example, we show the default behavior (i.e. `multiple(false)) which will throw
/// an error if more than one of the args in the group was used.
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup, ErrorKind};
/// let result = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"]))
/// .try_get_matches_from(vec!["myprog", "-f", "-c"]);
/// // Because we used both args in the group it's an error
/// assert!(result.is_err());
/// let err = result.unwrap_err();
/// assert_eq!(err.kind, ErrorKind::ArgumentConflict);
/// ```
///
/// [`Arg`]: crate::Arg
#[inline]
pub fn multiple(mut self, yes: bool) -> Self {
self.multiple = yes;
self
}
/// Require an argument from the group to be present when parsing.
///
/// This is unless conflicting with another argument. A required group will be displayed in
/// the usage string of the application in the format `<arg|arg2|arg3>`.
///
/// **NOTE:** This setting only applies to the current [`App`] / [`Subcommand`]s, and not
/// globally.
///
/// **NOTE:** By default, [`ArgGroup::multiple`] is set to `false` which when combined with
/// `ArgGroup::required(true)` states, "One and *only one* arg must be used from this group.
/// Use of more than one arg is an error." Vice setting `ArgGroup::multiple(true)` which
/// states, '*At least* one arg from this group must be used. Using multiple is OK."
///
/// # Examples
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup, ErrorKind};
/// let result = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"])
/// .required(true))
/// .try_get_matches_from(vec!["myprog"]);
/// // Because we didn't use any of the args in the group, it's an error
/// assert!(result.is_err());
/// let err = result.unwrap_err();
/// assert_eq!(err.kind, ErrorKind::MissingRequiredArgument);
/// ```
///
/// [`Subcommand`]: crate::Subcommand
/// [`ArgGroup::multiple`]: ArgGroup::multiple()
/// [`App`]: crate::App
#[inline]
pub fn required(mut self, yes: bool) -> Self {
self.required = yes;
self
}
/// Specify an argument or group that must be present when this group is.
///
/// This is not to be confused with a [required group]. Requirement rules function just like
/// [argument requirement rules], you can name other arguments or groups that must be present
/// when any one of the arguments from this group is used.
///
/// **NOTE:** The name provided may be an argument or group name
///
/// # Examples
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup, ErrorKind};
/// let result = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .arg(Arg::new("debug")
/// .short('d'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"])
/// .requires("debug"))
/// .try_get_matches_from(vec!["myprog", "-c"]);
/// // because we used an arg from the group, and the group requires "-d" to be used, it's an
/// // error
/// assert!(result.is_err());
/// let err = result.unwrap_err();
/// assert_eq!(err.kind, ErrorKind::MissingRequiredArgument);
/// ```
/// [required group]: ArgGroup::required()
/// [argument requirement rules]: crate::Arg::requires()
pub fn requires<T: Key>(mut self, id: T) -> Self {
self.requires.push(id.into());
self
}
/// Specify arguments or groups that must be present when this group is.
///
/// This is not to be confused with a [required group]. Requirement rules function just like
/// [argument requirement rules], you can name other arguments or groups that must be present
/// when one of the arguments from this group is used.
///
/// **NOTE:** The names provided may be an argument or group name
///
/// # Examples
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup, ErrorKind};
/// let result = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .arg(Arg::new("debug")
/// .short('d'))
/// .arg(Arg::new("verb")
/// .short('v'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"])
/// .requires_all(&["debug", "verb"]))
/// .try_get_matches_from(vec!["myprog", "-c", "-d"]);
/// // because we used an arg from the group, and the group requires "-d" and "-v" to be used,
/// // yet we only used "-d" it's an error
/// assert!(result.is_err());
/// let err = result.unwrap_err();
/// assert_eq!(err.kind, ErrorKind::MissingRequiredArgument);
/// ```
/// [required group]: ArgGroup::required()
/// [argument requirement rules]: crate::Arg::requires_all()
pub fn requires_all(mut self, ns: &[&'help str]) -> Self {
for n in ns {
self = self.requires(n);
}
self
}
/// Specify an argument or group that must **not** be present when this group is.
///
/// Exclusion (aka conflict) rules function just like [argument exclusion rules], you can name
/// other arguments or groups that must *not* be present when one of the arguments from this
/// group are used.
///
/// **NOTE:** The name provided may be an argument, or group name
///
/// # Examples
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup, ErrorKind};
/// let result = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .arg(Arg::new("debug")
/// .short('d'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"])
/// .conflicts_with("debug"))
/// .try_get_matches_from(vec!["myprog", "-c", "-d"]);
/// // because we used an arg from the group, and the group conflicts with "-d", it's an error
/// assert!(result.is_err());
/// let err = result.unwrap_err();
/// assert_eq!(err.kind, ErrorKind::ArgumentConflict);
/// ```
/// [argument exclusion rules]: crate::Arg::conflicts_with()
pub fn conflicts_with<T: Key>(mut self, id: T) -> Self {
self.conflicts.push(id.into());
self
}
/// Specify arguments or groups that must **not** be present when this group is.
///
/// Exclusion rules function just like [argument exclusion rules], you can name other arguments
/// or groups that must *not* be present when one of the arguments from this group are used.
///
/// **NOTE:** The names provided may be an argument, or group name
///
/// # Examples
///
/// ```rust
/// # use clap::{App, Arg, ArgGroup, ErrorKind};
/// let result = App::new("myprog")
/// .arg(Arg::new("flag")
/// .short('f'))
/// .arg(Arg::new("color")
/// .short('c'))
/// .arg(Arg::new("debug")
/// .short('d'))
/// .arg(Arg::new("verb")
/// .short('v'))
/// .group(ArgGroup::new("req_flags")
/// .args(&["flag", "color"])
/// .conflicts_with_all(&["debug", "verb"]))
/// .try_get_matches_from(vec!["myprog", "-c", "-v"]);
/// // because we used an arg from the group, and the group conflicts with either "-v" or "-d"
/// // it's an error
/// assert!(result.is_err());
/// let err = result.unwrap_err();
/// assert_eq!(err.kind, ErrorKind::ArgumentConflict);
/// ```
///
/// [argument exclusion rules]: crate::Arg::conflicts_with_all()
pub fn conflicts_with_all(mut self, ns: &[&'help str]) -> Self {
for n in ns {
self = self.conflicts_with(n);
}
self
}
/// Deprecated, replaced with [`ArgGroup::new`]
#[deprecated(since = "3.0.0", note = "Replaced with `ArgGroup::new`")]
pub fn with_name<S: Into<&'help str>>(n: S) -> Self {
Self::new(n)
}
/// Deprecated in [Issue #9](https://github.com/epage/clapng/issues/9), maybe [`clap::Parser`][crate::Parser] would fit your use case?
#[cfg(feature = "yaml")]
#[deprecated(
since = "3.0.0",
note = "Maybe clap::Parser would fit your use case? (Issue #9)"
)]
pub fn from_yaml(yaml: &'help Yaml) -> Self {
Self::from(yaml)
}
}
impl<'help> From<&'_ ArgGroup<'help>> for ArgGroup<'help> {
fn from(g: &ArgGroup<'help>) -> Self {
ArgGroup {
id: g.id.clone(),
name: g.name,
required: g.required,
args: g.args.clone(),
requires: g.requires.clone(),
conflicts: g.conflicts.clone(),
multiple: g.multiple,
}
}
}
/// Deprecated in [Issue #9](https://github.com/epage/clapng/issues/9), maybe [`clap::Parser`][crate::Parser] would fit your use case?
#[cfg(feature = "yaml")]
impl<'help> From<&'help Yaml> for ArgGroup<'help> {
/// Deprecated in [Issue #9](https://github.com/epage/clapng/issues/9), maybe [`clap::Parser`][crate::Parser] would fit your use case?
fn from(y: &'help Yaml) -> Self {
let b = y.as_hash().expect("ArgGroup::from::<Yaml> expects a table");
// We WANT this to panic on error...so expect() is good.
let mut a = ArgGroup::default();
let group_settings = if b.len() == 1 {
let name_yaml = b.keys().next().expect("failed to get name");
let name_str = name_yaml
.as_str()
.expect("failed to convert arg YAML name to str");
a.name = name_str;
a.id = Id::from(&a.name);
b.get(name_yaml)
.expect("failed to get name_str")
.as_hash()
.expect("failed to convert to a hash")
} else {
b
};
for (k, v) in group_settings {
a = match k.as_str().unwrap() {
"required" => a.required(v.as_bool().unwrap()),
"multiple" => a.multiple(v.as_bool().unwrap()),
"args" => yaml_vec_or_str!(a, v, arg),
"arg" => {
if let Some(ys) = v.as_str() {
a = a.arg(ys);
}
a
}
"requires" => yaml_vec_or_str!(a, v, requires),
"conflicts_with" => yaml_vec_or_str!(a, v, conflicts_with),
"name" => {
if let Some(ys) = v.as_str() {
a = a.name(ys);
}
a
}
s => panic!(
"Unknown ArgGroup setting '{}' in YAML file for \
ArgGroup '{}'",
s, a.name
),
}
}
a
}
}
#[cfg(test)]
mod test {
use super::ArgGroup;
#[cfg(feature = "yaml")]
use yaml_rust::YamlLoader;
#[test]
fn groups() {
let g = ArgGroup::new("test")
.arg("a1")
.arg("a4")
.args(&["a2", "a3"])
.required(true)
.conflicts_with("c1")
.conflicts_with_all(&["c2", "c3"])
.conflicts_with("c4")
.requires("r1")
.requires_all(&["r2", "r3"])
.requires("r4");
let args = vec!["a1".into(), "a4".into(), "a2".into(), "a3".into()];
let reqs = vec!["r1".into(), "r2".into(), "r3".into(), "r4".into()];
let confs = vec!["c1".into(), "c2".into(), "c3".into(), "c4".into()];
assert_eq!(g.args, args);
assert_eq!(g.requires, reqs);
assert_eq!(g.conflicts, confs);
}
#[test]
fn test_from() {
let g = ArgGroup::new("test")
.arg("a1")
.arg("a4")
.args(&["a2", "a3"])
.required(true)
.conflicts_with("c1")
.conflicts_with_all(&["c2", "c3"])
.conflicts_with("c4")
.requires("r1")
.requires_all(&["r2", "r3"])
.requires("r4");
let args = vec!["a1".into(), "a4".into(), "a2".into(), "a3".into()];
let reqs = vec!["r1".into(), "r2".into(), "r3".into(), "r4".into()];
let confs = vec!["c1".into(), "c2".into(), "c3".into(), "c4".into()];
let g2 = ArgGroup::from(&g);
assert_eq!(g2.args, args);
assert_eq!(g2.requires, reqs);
assert_eq!(g2.conflicts, confs);
}
#[cfg(feature = "yaml")]
#[test]
fn test_yaml() {
let g_yaml = "name: test
args:
- a1
- a4
- a2
- a3
conflicts_with:
- c1
- c2
- c3
- c4
requires:
- r1
- r2
- r3
- r4";
let yaml = &YamlLoader::load_from_str(g_yaml).expect("failed to load YAML file")[0];
let g = ArgGroup::from(yaml);
let args = vec!["a1".into(), "a4".into(), "a2".into(), "a3".into()];
let reqs = vec!["r1".into(), "r2".into(), "r3".into(), "r4".into()];
let confs = vec!["c1".into(), "c2".into(), "c3".into(), "c4".into()];
assert_eq!(g.args, args);
assert_eq!(g.requires, reqs);
assert_eq!(g.conflicts, confs);
}
// This test will *fail to compile* if ArgGroup is not Send + Sync
#[test]
fn arg_group_send_sync() {
fn foo<T: Send + Sync>(_: T) {}
foo(ArgGroup::new("test"))
}
}
impl Clone for ArgGroup<'_> {
fn clone(&self) -> Self {
ArgGroup {
id: self.id.clone(),
name: self.name,
required: self.required,
args: self.args.clone(),
requires: self.requires.clone(),
conflicts: self.conflicts.clone(),
multiple: self.multiple,
}
}
}
| 36.026316 | 138 | 0.527301 |
3a50b0681e8b89abd5bc2921f7d61de3dae23057
| 30,683 |
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Coherence phase
//
// The job of the coherence phase of typechecking is to ensure that each trait
// has at most one implementation for each type. Then we build a mapping from
// each trait in the system to its implementations.
use metadata::csearch::{each_impl, get_impl_trait, each_implementation_for_trait};
use metadata::csearch;
use middle::ty::get;
use middle::ty::{ImplContainer, lookup_item_type, subst};
use middle::ty::{substs, t, ty_bool, ty_char, ty_bot, ty_box, ty_enum, ty_err};
use middle::ty::{ty_str, ty_vec, ty_float, ty_infer, ty_int, ty_nil};
use middle::ty::{ty_param, ty_param_bounds_and_ty, ty_ptr};
use middle::ty::{ty_rptr, ty_self, ty_struct, ty_trait, ty_tup};
use middle::ty::{ty_uint, ty_uniq, ty_bare_fn, ty_closure};
use middle::ty::type_is_ty_var;
use middle::subst::Subst;
use middle::ty;
use middle::typeck::CrateCtxt;
use middle::typeck::infer::combine::Combine;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::{new_infer_ctxt, resolve_ivar, resolve_type};
use middle::typeck::infer;
use util::ppaux::Repr;
use syntax::ast::{Crate, DefId, DefStruct, DefTy};
use syntax::ast::{Item, ItemEnum, ItemImpl, ItemMod, ItemStruct};
use syntax::ast::{LOCAL_CRATE, TraitRef, TyPath};
use syntax::ast;
use syntax::ast_map::NodeItem;
use syntax::ast_map;
use syntax::ast_util::{def_id_of_def, local_def};
use syntax::codemap::Span;
use syntax::owned_slice::OwnedSlice;
use syntax::parse::token;
use syntax::visit;
use collections::HashSet;
use std::cell::RefCell;
use std::rc::Rc;
struct UniversalQuantificationResult {
monotype: t,
type_variables: Vec<ty::t> ,
type_param_defs: Rc<Vec<ty::TypeParameterDef> >
}
fn get_base_type(inference_context: &InferCtxt,
span: Span,
original_type: t)
-> Option<t> {
let resolved_type;
match resolve_type(inference_context,
original_type,
resolve_ivar) {
Ok(resulting_type) if !type_is_ty_var(resulting_type) => {
resolved_type = resulting_type;
}
_ => {
inference_context.tcx.sess.span_fatal(span,
"the type of this value must be known in order \
to determine the base type");
}
}
match get(resolved_type).sty {
ty_enum(..) | ty_trait(..) | ty_struct(..) => {
debug!("(getting base type) found base type");
Some(resolved_type)
}
ty_nil | ty_bot | ty_bool | ty_char | ty_int(..) | ty_uint(..) | ty_float(..) |
ty_str(..) | ty_vec(..) | ty_bare_fn(..) | ty_closure(..) | ty_tup(..) |
ty_infer(..) | ty_param(..) | ty_self(..) | ty_err |
ty_box(_) | ty_uniq(_) | ty_ptr(_) | ty_rptr(_, _) => {
debug!("(getting base type) no base type; found {:?}",
get(original_type).sty);
None
}
}
}
fn type_is_defined_in_local_crate(original_type: t) -> bool {
/*!
*
* For coherence, when we have `impl Trait for Type`, we need to
* guarantee that `Type` is "local" to the
* crate. For our purposes, this means that it must contain
* some nominal type defined in this crate.
*/
let mut found_nominal = false;
ty::walk_ty(original_type, |t| {
match get(t).sty {
ty_enum(def_id, _) |
ty_trait(~ty::TyTrait { def_id, .. }) |
ty_struct(def_id, _) => {
if def_id.krate == ast::LOCAL_CRATE {
found_nominal = true;
}
}
_ => { }
}
});
return found_nominal;
}
// Returns the def ID of the base type, if there is one.
fn get_base_type_def_id(inference_context: &InferCtxt,
span: Span,
original_type: t)
-> Option<DefId> {
match get_base_type(inference_context, span, original_type) {
None => {
return None;
}
Some(base_type) => {
match get(base_type).sty {
ty_enum(def_id, _) |
ty_struct(def_id, _) |
ty_trait(~ty::TyTrait { def_id, .. }) => {
return Some(def_id);
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
}
}
struct CoherenceChecker<'a> {
crate_context: &'a CrateCtxt<'a>,
inference_context: InferCtxt<'a>,
}
struct CoherenceCheckVisitor<'a> {
cc: &'a CoherenceChecker<'a>
}
impl<'a> visit::Visitor<()> for CoherenceCheckVisitor<'a> {
fn visit_item(&mut self, item: &Item, _: ()) {
//debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
match item.node {
ItemImpl(_, ref opt_trait, _, _) => {
match opt_trait.clone() {
Some(opt_trait) => {
self.cc.check_implementation(item, [opt_trait]);
}
None => self.cc.check_implementation(item, [])
}
}
_ => {
// Nothing to do.
}
};
visit::walk_item(self, item, ());
}
}
struct PrivilegedScopeVisitor<'a> { cc: &'a CoherenceChecker<'a> }
impl<'a> visit::Visitor<()> for PrivilegedScopeVisitor<'a> {
fn visit_item(&mut self, item: &Item, _: ()) {
match item.node {
ItemMod(ref module_) => {
// Then visit the module items.
visit::walk_mod(self, module_, ());
}
ItemImpl(_, None, ast_ty, _) => {
if !self.cc.ast_type_is_defined_in_local_crate(ast_ty) {
// This is an error.
let session = &self.cc.crate_context.tcx.sess;
session.span_err(item.span,
"cannot associate methods with a type outside the \
crate the type is defined in; define and implement \
a trait or new type instead");
}
}
ItemImpl(_, Some(ref trait_ref), _, _) => {
// `for_ty` is `Type` in `impl Trait for Type`
let for_ty =
ty::node_id_to_type(self.cc.crate_context.tcx,
item.id);
if !type_is_defined_in_local_crate(for_ty) {
// This implementation is not in scope of its base
// type. This still might be OK if the trait is
// defined in the same crate.
let trait_def_id =
self.cc.trait_ref_to_trait_def_id(trait_ref);
if trait_def_id.krate != LOCAL_CRATE {
let session = &self.cc.crate_context.tcx.sess;
session.span_err(item.span,
"cannot provide an extension implementation \
where both trait and type are not defined in this crate");
}
}
visit::walk_item(self, item, ());
}
_ => {
visit::walk_item(self, item, ());
}
}
}
}
impl<'a> CoherenceChecker<'a> {
fn check(&self, krate: &Crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// builds up the trait inheritance table.
let mut visitor = CoherenceCheckVisitor { cc: self };
visit::walk_crate(&mut visitor, krate, ());
// Check that there are no overlapping trait instances
self.check_implementation_coherence();
// Check whether traits with base types are in privileged scopes.
self.check_privileged_scopes(krate);
// Bring in external crates. It's fine for this to happen after the
// coherence checks, because we ensure by construction that no errors
// can happen at link time.
self.add_external_crates();
// Populate the table of destructors. It might seem a bit strange to
// do this here, but it's actually the most convenient place, since
// the coherence tables contain the trait -> type mappings.
self.populate_destructor_table();
}
fn check_implementation(&self, item: &Item,
associated_traits: &[TraitRef]) {
let tcx = self.crate_context.tcx;
let impl_did = local_def(item.id);
let self_type = ty::lookup_item_type(tcx, impl_did);
// If there are no traits, then this implementation must have a
// base type.
if associated_traits.len() == 0 {
debug!("(checking implementation) no associated traits for item '{}'",
token::get_ident(item.ident));
match get_base_type_def_id(&self.inference_context,
item.span,
self_type.ty) {
None => {
let session = &self.crate_context.tcx.sess;
session.span_err(item.span,
"no base type found for inherent implementation; \
implement a trait or new type instead");
}
Some(_) => {
// Nothing to do.
}
}
}
let impl_methods = self.create_impl_from_item(item);
for associated_trait in associated_traits.iter() {
let trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx, associated_trait.ref_id);
debug!("(checking implementation) adding impl for trait '{}', item '{}'",
trait_ref.repr(self.crate_context.tcx),
token::get_ident(item.ident));
self.add_trait_impl(trait_ref.def_id, impl_did);
}
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
// the implementation does not have any associated traits.
match get_base_type_def_id(&self.inference_context,
item.span,
self_type.ty) {
None => {
// Nothing to do.
}
Some(base_type_def_id) => {
// FIXME: Gather up default methods?
if associated_traits.len() == 0 {
self.add_inherent_impl(base_type_def_id, impl_did);
}
}
}
tcx.impl_methods.borrow_mut().insert(impl_did, impl_methods);
}
// Creates default method IDs and performs type substitutions for an impl
// and trait pair. Then, for each provided method in the trait, inserts a
// `ProvidedMethodInfo` instance into the `provided_method_sources` map.
fn instantiate_default_methods(&self, impl_id: DefId,
trait_ref: &ty::TraitRef,
all_methods: &mut Vec<DefId>) {
let tcx = self.crate_context.tcx;
debug!("instantiate_default_methods(impl_id={:?}, trait_ref={})",
impl_id, trait_ref.repr(tcx));
let impl_poly_type = ty::lookup_item_type(tcx, impl_id);
let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
for trait_method in prov.iter() {
// Synthesize an ID.
let new_id = tcx.sess.next_node_id();
let new_did = local_def(new_id);
debug!("new_did={:?} trait_method={}", new_did, trait_method.repr(tcx));
// Create substitutions for the various trait parameters.
let new_method_ty =
Rc::new(subst_receiver_types_in_method_ty(
tcx,
impl_id,
trait_ref,
new_did,
&**trait_method,
Some(trait_method.def_id)));
debug!("new_method_ty={}", new_method_ty.repr(tcx));
all_methods.push(new_did);
// construct the polytype for the method based on the method_ty
let new_generics = ty::Generics {
type_param_defs:
Rc::new(Vec::from_slice(impl_poly_type.generics.type_param_defs()).append(
new_method_ty.generics.type_param_defs())),
region_param_defs:
Rc::new(Vec::from_slice(impl_poly_type.generics.region_param_defs()).append(
new_method_ty.generics.region_param_defs()))
};
let new_polytype = ty::ty_param_bounds_and_ty {
generics: new_generics,
ty: ty::mk_bare_fn(tcx, new_method_ty.fty.clone())
};
debug!("new_polytype={}", new_polytype.repr(tcx));
tcx.tcache.borrow_mut().insert(new_did, new_polytype);
tcx.methods.borrow_mut().insert(new_did, new_method_ty);
// Pair the new synthesized ID up with the
// ID of the method.
self.crate_context.tcx.provided_method_sources.borrow_mut()
.insert(new_did, trait_method.def_id);
}
}
fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
let tcx = self.crate_context.tcx;
match tcx.inherent_impls.borrow().find(&base_def_id) {
Some(implementation_list) => {
implementation_list.borrow_mut().push(impl_def_id);
return;
}
None => {}
}
tcx.inherent_impls.borrow_mut().insert(base_def_id,
Rc::new(RefCell::new(vec!(impl_def_id))));
}
fn add_trait_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
ty::record_trait_implementation(self.crate_context.tcx,
base_def_id,
impl_def_id);
}
fn check_implementation_coherence(&self) {
for &trait_id in self.crate_context.tcx.trait_impls.borrow().keys() {
self.check_implementation_coherence_of(trait_id);
}
}
fn check_implementation_coherence_of(&self, trait_def_id: DefId) {
// Unify pairs of polytypes.
self.iter_impls_of_trait_local(trait_def_id, |impl_a| {
let polytype_a =
self.get_self_type_for_implementation(impl_a);
// "We have an impl of trait <trait_def_id> for type <polytype_a>,
// and that impl is <impl_a>"
self.iter_impls_of_trait(trait_def_id, |impl_b| {
// An impl is coherent with itself
if impl_a != impl_b {
let polytype_b = self.get_self_type_for_implementation(
impl_b);
if self.polytypes_unify(polytype_a.clone(), polytype_b) {
let session = &self.crate_context.tcx.sess;
session.span_err(
self.span_of_impl(impl_a),
format!("conflicting implementations for trait `{}`",
ty::item_path_str(self.crate_context.tcx,
trait_def_id)));
if impl_b.krate == LOCAL_CRATE {
session.span_note(self.span_of_impl(impl_b),
"note conflicting implementation here");
} else {
let crate_store = &self.crate_context.tcx.sess.cstore;
let cdata = crate_store.get_crate_data(impl_b.krate);
session.note(
"conflicting implementation in crate `" + cdata.name + "`");
}
}
}
})
})
}
fn iter_impls_of_trait(&self, trait_def_id: DefId, f: |DefId|) {
self.iter_impls_of_trait_local(trait_def_id, |x| f(x));
if trait_def_id.krate == LOCAL_CRATE {
return;
}
let crate_store = &self.crate_context.tcx.sess.cstore;
csearch::each_implementation_for_trait(crate_store, trait_def_id, |impl_def_id| {
// Is this actually necessary?
let _ = lookup_item_type(self.crate_context.tcx, impl_def_id);
f(impl_def_id);
});
}
fn iter_impls_of_trait_local(&self, trait_def_id: DefId, f: |DefId|) {
match self.crate_context.tcx.trait_impls.borrow().find(&trait_def_id) {
Some(impls) => {
for &impl_did in impls.borrow().iter() {
f(impl_did);
}
}
None => { /* no impls? */ }
}
}
fn polytypes_unify(&self,
polytype_a: ty_param_bounds_and_ty,
polytype_b: ty_param_bounds_and_ty)
-> bool {
let universally_quantified_a =
self.universally_quantify_polytype(polytype_a);
let universally_quantified_b =
self.universally_quantify_polytype(polytype_b);
return self.can_unify_universally_quantified(
&universally_quantified_a, &universally_quantified_b) ||
self.can_unify_universally_quantified(
&universally_quantified_b, &universally_quantified_a);
}
// Converts a polytype to a monotype by replacing all parameters with
// type variables. Returns the monotype and the type variables created.
fn universally_quantify_polytype(&self, polytype: ty_param_bounds_and_ty)
-> UniversalQuantificationResult {
let region_parameters =
polytype.generics.region_param_defs().iter()
.map(|d| self.inference_context.next_region_var(
infer::BoundRegionInCoherence(d.name)))
.collect();
let bounds_count = polytype.generics.type_param_defs().len();
let type_parameters = self.inference_context.next_ty_vars(bounds_count);
let substitutions = substs {
regions: ty::NonerasedRegions(region_parameters),
self_ty: None,
tps: type_parameters
};
let monotype = subst(self.crate_context.tcx,
&substitutions,
polytype.ty);
UniversalQuantificationResult {
monotype: monotype,
type_variables: substitutions.tps,
type_param_defs: polytype.generics.type_param_defs.clone()
}
}
fn can_unify_universally_quantified<'a>(&self,
a: &'a UniversalQuantificationResult,
b: &'a UniversalQuantificationResult)
-> bool {
infer::can_mk_subty(&self.inference_context,
a.monotype,
b.monotype).is_ok()
}
fn get_self_type_for_implementation(&self, impl_did: DefId)
-> ty_param_bounds_and_ty {
self.crate_context.tcx.tcache.borrow().get_copy(&impl_did)
}
// Privileged scope checking
fn check_privileged_scopes(&self, krate: &Crate) {
let mut visitor = PrivilegedScopeVisitor{ cc: self };
visit::walk_crate(&mut visitor, krate, ());
}
fn trait_ref_to_trait_def_id(&self, trait_ref: &TraitRef) -> DefId {
let def_map = &self.crate_context.tcx.def_map;
let trait_def = def_map.borrow().get_copy(&trait_ref.ref_id);
let trait_id = def_id_of_def(trait_def);
return trait_id;
}
/// For coherence, when we have `impl Type`, we need to guarantee that
/// `Type` is "local" to the crate. For our purposes, this means that it
/// must precisely name some nominal type defined in this crate.
fn ast_type_is_defined_in_local_crate(&self, original_type: &ast::Ty) -> bool {
match original_type.node {
TyPath(_, _, path_id) => {
match self.crate_context.tcx.def_map.borrow().get_copy(&path_id) {
DefTy(def_id) | DefStruct(def_id) => {
if def_id.krate != LOCAL_CRATE {
return false;
}
// Make sure that this type precisely names a nominal
// type.
match self.crate_context.tcx.map.find(def_id.node) {
None => {
self.crate_context.tcx.sess.span_bug(
original_type.span,
"resolve didn't resolve this type?!");
}
Some(NodeItem(item)) => {
match item.node {
ItemStruct(..) | ItemEnum(..) => true,
_ => false,
}
}
Some(_) => false,
}
}
_ => false
}
}
_ => false
}
}
// Converts an implementation in the AST to a vector of methods.
fn create_impl_from_item(&self, item: &Item) -> Vec<DefId> {
match item.node {
ItemImpl(_, ref trait_refs, _, ref ast_methods) => {
let mut methods: Vec<DefId> = ast_methods.iter().map(|ast_method| {
local_def(ast_method.id)
}).collect();
for trait_ref in trait_refs.iter() {
let ty_trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx,
trait_ref.ref_id);
self.instantiate_default_methods(local_def(item.id),
&*ty_trait_ref,
&mut methods);
}
methods
}
_ => {
self.crate_context.tcx.sess.span_bug(item.span,
"can't convert a non-impl to an impl");
}
}
}
fn span_of_impl(&self, impl_did: DefId) -> Span {
assert_eq!(impl_did.krate, LOCAL_CRATE);
self.crate_context.tcx.map.span(impl_did.node)
}
// External crate handling
fn add_external_impl(&self,
impls_seen: &mut HashSet<DefId>,
impl_def_id: DefId) {
let tcx = self.crate_context.tcx;
let methods = csearch::get_impl_methods(&tcx.sess.cstore, impl_def_id);
// Make sure we don't visit the same implementation multiple times.
if !impls_seen.insert(impl_def_id) {
// Skip this one.
return
}
// Good. Continue.
let _ = lookup_item_type(tcx, impl_def_id);
let associated_traits = get_impl_trait(tcx, impl_def_id);
// Do a sanity check.
assert!(associated_traits.is_some());
// Record all the trait methods.
for trait_ref in associated_traits.iter() {
self.add_trait_impl(trait_ref.def_id, impl_def_id);
}
// For any methods that use a default implementation, add them to
// the map. This is a bit unfortunate.
for &method_def_id in methods.iter() {
for &source in ty::method(tcx, method_def_id).provided_source.iter() {
tcx.provided_method_sources.borrow_mut().insert(method_def_id, source);
}
}
tcx.impl_methods.borrow_mut().insert(impl_def_id, methods);
}
// Adds implementations and traits from external crates to the coherence
// info.
fn add_external_crates(&self) {
let mut impls_seen = HashSet::new();
let crate_store = &self.crate_context.tcx.sess.cstore;
crate_store.iter_crate_data(|crate_number, _crate_metadata| {
each_impl(crate_store, crate_number, |def_id| {
assert_eq!(crate_number, def_id.krate);
self.add_external_impl(&mut impls_seen, def_id)
})
})
}
//
// Destructors
//
fn populate_destructor_table(&self) {
let tcx = self.crate_context.tcx;
let drop_trait = match tcx.lang_items.drop_trait() {
Some(id) => id, None => { return }
};
let impl_methods = tcx.impl_methods.borrow();
let trait_impls = match tcx.trait_impls.borrow().find_copy(&drop_trait) {
None => return, // No types with (new-style) dtors present.
Some(found_impls) => found_impls
};
for &impl_did in trait_impls.borrow().iter() {
let methods = impl_methods.get(&impl_did);
if methods.len() < 1 {
// We'll error out later. For now, just don't ICE.
continue;
}
let method_def_id = *methods.get(0);
let self_type = self.get_self_type_for_implementation(impl_did);
match ty::get(self_type.ty).sty {
ty::ty_enum(type_def_id, _) |
ty::ty_struct(type_def_id, _) => {
tcx.destructor_for_type.borrow_mut().insert(type_def_id,
method_def_id);
tcx.destructors.borrow_mut().insert(method_def_id);
}
_ => {
// Destructors only work on nominal types.
if impl_did.krate == ast::LOCAL_CRATE {
{
match tcx.map.find(impl_did.node) {
Some(ast_map::NodeItem(item)) => {
tcx.sess.span_err((*item).span,
"the Drop trait may \
only be implemented \
on structures");
}
_ => {
tcx.sess.bug("didn't find impl in ast \
map");
}
}
}
} else {
tcx.sess.bug("found external impl of Drop trait on \
something other than a struct");
}
}
}
}
}
}
pub fn make_substs_for_receiver_types(tcx: &ty::ctxt,
impl_id: ast::DefId,
trait_ref: &ty::TraitRef,
method: &ty::Method)
-> ty::substs {
/*!
* Substitutes the values for the receiver's type parameters
* that are found in method, leaving the method's type parameters
* intact. This is in fact a mildly complex operation,
* largely because of the hokey way that we concatenate the
* receiver and method generics.
*/
let impl_polytype = ty::lookup_item_type(tcx, impl_id);
let num_impl_tps = impl_polytype.generics.type_param_defs().len();
let num_impl_regions = impl_polytype.generics.region_param_defs().len();
let meth_tps: Vec<ty::t> =
method.generics.type_param_defs().iter().enumerate()
.map(|(i, t)| ty::mk_param(tcx, i + num_impl_tps, t.def_id))
.collect();
let meth_regions: Vec<ty::Region> =
method.generics.region_param_defs().iter().enumerate()
.map(|(i, l)| ty::ReEarlyBound(l.def_id.node, i + num_impl_regions, l.name))
.collect();
let mut combined_tps = trait_ref.substs.tps.clone();
combined_tps.push_all_move(meth_tps);
let combined_regions = match &trait_ref.substs.regions {
&ty::ErasedRegions =>
fail!("make_substs_for_receiver_types: unexpected ErasedRegions"),
&ty::NonerasedRegions(ref rs) => {
let mut rs = rs.clone().into_vec();
rs.push_all_move(meth_regions);
ty::NonerasedRegions(OwnedSlice::from_vec(rs))
}
};
ty::substs {
regions: combined_regions,
self_ty: trait_ref.substs.self_ty,
tps: combined_tps
}
}
fn subst_receiver_types_in_method_ty(tcx: &ty::ctxt,
impl_id: ast::DefId,
trait_ref: &ty::TraitRef,
new_def_id: ast::DefId,
method: &ty::Method,
provided_source: Option<ast::DefId>)
-> ty::Method {
let combined_substs = make_substs_for_receiver_types(
tcx, impl_id, trait_ref, method);
ty::Method::new(
method.ident,
// method types *can* appear in the generic bounds
method.generics.subst(tcx, &combined_substs),
// method types *can* appear in the fty
method.fty.subst(tcx, &combined_substs),
method.explicit_self,
method.vis,
new_def_id,
ImplContainer(impl_id),
provided_source
)
}
pub fn check_coherence(crate_context: &CrateCtxt, krate: &Crate) {
CoherenceChecker {
crate_context: crate_context,
inference_context: new_infer_ctxt(crate_context.tcx),
}.check(krate);
}
| 39.086624 | 98 | 0.530098 |
76e40842c98707bceb178f3f940d93789073995b
| 1,489 |
pub mod backend;
mod chan;
pub mod io;
mod mpmc;
mod req_handler;
mod resp_handler;
mod response;
mod status;
mod waker;
pub use chan::*;
pub use protocol::{Request, MAX_REQUEST_SIZE};
pub use response::*;
use waker::AtomicWaker;
pub use backend::{Backend, BackendBuilder, BackendStream};
pub use mpmc::MpmcRingBufferStream as RingBufferStream;
pub(crate) use req_handler::{BridgeRequestToBackend, RequestHandler};
pub(crate) use resp_handler::{BridgeResponseToLocal, ResponseHandler};
use std::io::Result;
/// 该接口是一个marker接口。实现了该接口的AsyncWrite,本身不
/// 会处理buf数据,只会把数据会给chan的接收方,但在数据会给
/// 下游之前,会确保buf是一个完整的request请求。request的格式
/// 由具体的协议决定。方便下由处理。
/// 通常实现要尽可能确保chan处理buf的过程是zero copy的。
/// 输入是pipeline的,输出是ping-pong的。
pub trait AsyncPipeToPingPongChanWrite: AsyncWriteAll + Unpin {}
/// 标识一个实现了AsyncWrite的接口,写入buf时,只能有以下情况:
/// buf全部写入成功
/// Pending
/// 写入错误
/// 不能出现部分写入成功的情况。方案处理
pub trait AsyncWriteAll {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context, req: &Request) -> Poll<Result<()>>;
}
/// 确保读取response的时候,类似于NotFound、Stored这样的数据包含
/// 在一个readbuf中,不被拆开,方便判断
use std::pin::Pin;
use std::task::{Context, Poll};
// 数据读取的时候,要么一次性全部读取,要么都不读取
pub trait AsyncReadAll {
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<Response>>;
}
//impl AsyncWriteAll for tokio::net::TcpStream {}
//impl AsyncWriteAll for tokio::net::tcp::OwnedWriteHalf {}
pub const MAX_CONNECTIONS: usize = 256;
// 当stream退出时,通知
pub trait Notify {
fn notify(&self);
}
| 25.672414 | 93 | 0.744795 |
bfca4fec706eda94267e9779c79320c94d18866a
| 7,169 |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type Names for Debug Info.
use common::CrateContext;
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty};
use rustc::hir;
// Compute the name of the type as it should be stored in debuginfo. Does not do
// any caching, i.e. calling the function twice with the same type will also do
// the work twice. The `qualified` parameter only affects the first level of the
// type name, further levels (i.e. type parameters) are always fully qualified.
pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool)
-> String {
let mut result = String::with_capacity(64);
push_debuginfo_type_name(cx, t, qualified, &mut result);
result
}
// Pushes the name of the type as it should be stored in debuginfo on the
// `output` String. See also compute_debuginfo_type_name().
pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool,
output: &mut String) {
match t.sty {
ty::TyBool => output.push_str("bool"),
ty::TyChar => output.push_str("char"),
ty::TyStr => output.push_str("str"),
ty::TyNever => output.push_str("!"),
ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()),
ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()),
ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()),
ty::TyAdt(def, substs) => {
push_item_name(cx, def.did, qualified, output);
push_type_params(cx, substs, output);
},
ty::TyTuple(component_types, _) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
output.push_str(", ");
}
if !component_types.is_empty() {
output.pop();
output.pop();
}
output.push(')');
},
ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
hir::MutImmutable => output.push_str("const "),
hir::MutMutable => output.push_str("mut "),
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
output.push('&');
if mutbl == hir::MutMutable {
output.push_str("mut ");
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyArray(inner_type, len) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push_str(&format!("; {}", len));
output.push(']');
},
ty::TySlice(inner_type) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push(']');
},
ty::TyDynamic(ref trait_data, ..) => {
if let Some(principal) = trait_data.principal() {
let principal = cx.tcx().erase_late_bound_regions_and_normalize(
&principal);
push_item_name(cx, principal.def_id, false, output);
push_type_params(cx, principal.substs, output);
}
},
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
let sig = t.fn_sig(cx.tcx());
if sig.unsafety() == hir::Unsafety::Unsafe {
output.push_str("unsafe ");
}
let abi = sig.abi();
if abi != ::abi::Abi::Rust {
output.push_str("extern \"");
output.push_str(abi.name());
output.push_str("\" ");
}
output.push_str("fn(");
let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig);
if !sig.inputs().is_empty() {
for ¶meter_type in sig.inputs() {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
}
if sig.variadic {
if !sig.inputs().is_empty() {
output.push_str(", ...");
} else {
output.push_str("...");
}
}
output.push(')');
if !sig.output().is_nil() {
output.push_str(" -> ");
push_debuginfo_type_name(cx, sig.output(), true, output);
}
},
ty::TyClosure(..) => {
output.push_str("closure");
}
ty::TyError |
ty::TyInfer(_) |
ty::TyProjection(..) |
ty::TyAnon(..) |
ty::TyParam(_) => {
bug!("debuginfo: Trying to create type name for \
unexpected type: {:?}", t);
}
}
fn push_item_name(cx: &CrateContext,
def_id: DefId,
qualified: bool,
output: &mut String) {
if qualified {
output.push_str(&cx.tcx().crate_name(def_id.krate).as_str());
for path_element in cx.tcx().def_path(def_id).data {
output.push_str("::");
output.push_str(&path_element.data.as_interned_str());
}
} else {
output.push_str(&cx.tcx().item_name(def_id).as_str());
}
}
// Pushes the type parameters in the given `Substs` to the output string.
// This ignores region parameters, since they can't reliably be
// reconstructed for items from non-local crates. For local crates, this
// would be possible but with inlining and LTO we have to use the least
// common denominator - otherwise we would run into conflicts.
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &Substs<'tcx>,
output: &mut String) {
if substs.types().next().is_none() {
return;
}
output.push('<');
for type_parameter in substs.types() {
push_debuginfo_type_name(cx, type_parameter, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
output.push('>');
}
}
| 37.338542 | 80 | 0.515274 |
ef63837831dfdb506a8ef5dcbe4dac9ec416a598
| 10,519 |
#![feature(test)]
extern crate test;
use pretty_assertions::assert_eq;
use serde::Deserialize;
use serde_json::Value;
use std::{
cmp::Ordering,
collections::HashMap,
env,
fs::File,
io,
io::Read,
path::{Path, PathBuf},
};
use swc_common::{comments::SingleThreadedComments, input::StringInput, FromVariant, Mark};
use swc_ecma_ast::*;
use swc_ecma_codegen::Emitter;
use swc_ecma_parser::{EsConfig, Parser, Syntax};
use swc_ecma_preset_env::{preset_env, Config, FeatureOrModule, Mode, Targets, Version};
use swc_ecma_utils::drop_span;
use swc_ecma_visit::as_folder;
use swc_ecma_visit::FoldWith;
use swc_ecma_visit::VisitMut;
use test::{test_main, ShouldPanic, TestDesc, TestDescAndFn, TestFn, TestName, TestType};
use testing::NormalizedOutput;
use testing::Tester;
use walkdir::WalkDir;
/// options.json file
#[derive(Debug, Deserialize)]
struct BabelOptions {
presets: Vec<(String, PresetConfig)>,
}
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
struct PresetConfig {
#[serde(default)]
pub use_built_ins: UseBuiltIns,
#[serde(default)]
pub corejs: CoreJs,
#[serde(default)]
pub modules: ModulesConfig,
#[serde(default)]
pub targets: Option<Targets>,
#[serde(default)]
pub include: Vec<FeatureOrModule>,
#[serde(default)]
pub exclude: Vec<FeatureOrModule>,
#[serde(default)]
pub force_all_transforms: bool,
#[serde(default)]
pub shipped_proposals: bool,
#[serde(default)]
pub config_path: String,
#[serde(default)]
pub debug: bool,
}
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
#[serde(untagged)]
pub enum CoreJs {
Ver(Version),
Val(HashMap<String, Value>),
}
impl Default for CoreJs {
fn default() -> Self {
Self::Ver(Version {
major: 2,
minor: 0,
patch: 0,
})
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum ModulesConfig {
Bool(bool),
}
impl Default for ModulesConfig {
fn default() -> Self {
ModulesConfig::Bool(false)
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum UseBuiltIns {
Bool(bool),
Str(String),
}
impl Default for UseBuiltIns {
fn default() -> Self {
UseBuiltIns::Bool(false)
}
}
#[derive(Debug, FromVariant)]
enum Error {
Io(io::Error),
Var(env::VarError),
WalkDir(walkdir::Error),
Json(serde_json::Error),
Msg(String),
}
fn load() -> Result<Vec<TestDescAndFn>, Error> {
let mut tests = vec![];
let mut dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?);
dir.push("tests");
dir.push("fixtures");
for entry in WalkDir::new(&dir) {
let e = entry?;
println!("File: {}", e.path().display());
if e.metadata()?.is_file() {
continue;
}
match e.path().join("input.mjs").metadata() {
Ok(e) if e.is_file() => {}
_ => continue,
}
let cfg: BabelOptions = serde_json::from_reader(File::open(e.path().join("options.json"))?)
.map_err(|err| Error::Msg(format!("failed to parse options.json: {}", err)))?;
assert_eq!(cfg.presets.len(), 1);
let cfg = cfg.presets.into_iter().map(|v| v.1).next().unwrap();
let name = e
.path()
.strip_prefix(&dir)
.expect("failed to strip prefix")
.display()
.to_string();
tests.push(TestDescAndFn {
desc: TestDesc {
test_type: TestType::IntegrationTest,
ignore: e.path().to_string_lossy().contains(".")
|| env::var("TEST")
.map(|s| !name.contains(&s))
.unwrap_or(false),
name: TestName::DynTestName(name),
allow_fail: false,
should_panic: ShouldPanic::No,
},
testfn: TestFn::DynTestFn(Box::new(move || {
//
exec(cfg, e.path().to_path_buf()).expect("failed to run test")
})),
});
}
Ok(tests)
}
fn exec(c: PresetConfig, dir: PathBuf) -> Result<(), Error> {
println!("Config: {:?}", c);
Tester::new()
.print_errors(|cm, handler| {
let mut pass = preset_env(
Mark::fresh(Mark::root()),
Some(SingleThreadedComments::default()),
Config {
debug: c.debug,
mode: match c.use_built_ins {
UseBuiltIns::Bool(false) => None,
UseBuiltIns::Str(ref s) if s == "usage" => Some(Mode::Usage),
UseBuiltIns::Str(ref s) if s == "entry" => Some(Mode::Entry),
v => unreachable!("invalid: {:?}", v),
},
skip: vec![],
// TODO
loose: true,
// TODO
dynamic_import: true,
bugfixes: false,
include: c.include,
exclude: c.exclude,
core_js: match c.corejs {
CoreJs::Ver(v) => Some(v),
ref s => unimplemented!("Unknown core js version: {:?}", s),
},
force_all_transforms: c.force_all_transforms,
shipped_proposals: c.shipped_proposals,
targets: c.targets,
path: std::env::current_dir().unwrap(),
},
);
let print = |m: &Module| {
let mut buf = vec![];
{
let mut emitter = Emitter {
cfg: swc_ecma_codegen::Config { minify: false },
comments: None,
cm: cm.clone(),
wr: Box::new(swc_ecma_codegen::text_writer::JsWriter::new(
cm.clone(),
"\n",
&mut buf,
None,
)),
};
emitter.emit_module(m).expect("failed to emit module");
}
String::from_utf8(buf).expect("invalid utf8 character detected")
};
let fm = cm
.load_file(&dir.join("input.mjs"))
.expect("failed to load file");
let mut p = Parser::new(
Syntax::Es(EsConfig {
dynamic_import: true,
..Default::default()
}),
StringInput::from(&*fm),
None,
);
let module = p
.parse_module()
.map_err(|e| e.into_diagnostic(&handler).emit())?;
for e in p.take_errors() {
e.into_diagnostic(&handler).emit()
}
let actual = module.fold_with(&mut pass);
// debug mode?
if dir.join("stdout.txt").exists() {
let mut out = read(&dir.join("stdout.txt"));
if dir.join("stderr.txt").exists() {
out.push_str("\n\n");
out.push_str(&read(&dir.join("stderr.txt")));
}
return Ok(());
};
let actual_src = print(&actual);
if let Ok(..) = env::var("UPDATE") {
NormalizedOutput::from(actual_src.clone())
.compare_to_file(dir.join("output.mjs"))
.unwrap();
}
// It's normal transform test.
let expected = {
let fm = cm
.load_file(&dir.join("output.mjs"))
.expect("failed to load output file");
let mut p = Parser::new(
Syntax::Es(EsConfig {
dynamic_import: true,
..Default::default()
}),
StringInput::from(&*fm),
None,
);
let mut m = p
.parse_module()
.map_err(|e| e.into_diagnostic(&handler).emit())?;
for e in p.take_errors() {
e.into_diagnostic(&handler).emit()
}
m.body.sort_by(|a, b| match *a {
ModuleItem::ModuleDecl(ModuleDecl::Import(ImportDecl {
ref specifiers,
ref src,
..
})) if specifiers.is_empty() && src.value.starts_with("core-js/modules") => {
match *b {
ModuleItem::ModuleDecl(ModuleDecl::Import(ImportDecl {
specifiers: ref rs,
src: ref rsrc,
..
})) if rs.is_empty() && rsrc.value.starts_with("core-js/modules") => {
src.value.cmp(&rsrc.value)
}
_ => Ordering::Equal,
}
}
_ => Ordering::Equal,
});
m
};
let expected_src = print(&expected);
if drop_span(actual.fold_with(&mut as_folder(Normalizer)))
== drop_span(expected.fold_with(&mut as_folder(Normalizer)))
{
return Ok(());
}
if actual_src != expected_src {
panic!(
r#"assertion failed: `(left == right)`
{}"#,
::testing::diff(&actual_src, &expected_src),
);
}
Ok(())
})
.expect("failed to execute");
Ok(())
}
fn read(p: &Path) -> String {
let mut buf = String::new();
let mut f = File::open(p).expect("failed to open file");
f.read_to_string(&mut buf).expect("failed to read file");
buf
}
#[test]
fn fixtures() {
let tests = load().expect("failed to load fixtures");
let args: Vec<_> = env::args().collect();
test_main(&args, tests, Some(test::Options::new()));
}
struct Normalizer;
impl VisitMut for Normalizer {
fn visit_mut_str(&mut self, n: &mut Str) {
n.kind = Default::default();
}
}
| 28.977961 | 99 | 0.473619 |
79e00c8d35efcc1197bac084e242389f55a89761
| 3,133 |
use anyhow::{Result, anyhow};
use super::AtCoderProblem;
use scraper::{Html, Selector};
pub(super) fn scrape(html: &str, contest_id: &str) -> Result<Vec<AtCoderProblem>> {
Html::parse_document(html)
.select(&Selector::parse("tbody").unwrap())
.next()
.ok_or_else(|| anyhow!("Failed to parse html."))?
.select(&Selector::parse("tr").unwrap())
.map(|tr| {
let selector = Selector::parse("td").unwrap();
let mut tds = tr.select(&selector);
let position = tds
.next()
.ok_or_else(|| anyhow!("Failed to parse html."))?
.text()
.next()
.ok_or_else(|| anyhow!("Failed to parse html."))?
.to_owned();
let problem = tds.next().ok_or_else(|| anyhow!("Failed to parse html."))?;
let id = problem
.select(&Selector::parse("a").unwrap())
.next()
.ok_or_else(|| anyhow!("Failed to parse html."))?
.value()
.attr("href")
.ok_or_else(|| anyhow!("Failed to parse html."))?
.rsplit('/')
.next()
.ok_or_else(|| anyhow!("Failed to parse html."))?
.to_owned();
let title = problem
.text()
.next()
.ok_or_else(|| anyhow!("Failed to parse html."))?
.to_owned();
Ok(AtCoderProblem {
id,
contest_id: contest_id.to_owned(),
title,
position,
})
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::prelude::*;
#[test]
fn test_scrape() {
let mut file = File::open("test_resources/abc107_tasks").unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
let problems = scrape(&contents, "abc107").unwrap();
assert_eq!(
problems,
vec![
AtCoderProblem {
id: "abc107_a".to_owned(),
contest_id: "abc107".to_owned(),
title: "Train".to_owned(),
position: "A".to_owned()
},
AtCoderProblem {
id: "abc107_b".to_owned(),
contest_id: "abc107".to_owned(),
title: "Grid Compression".to_owned(),
position: "B".to_owned()
},
AtCoderProblem {
id: "arc101_a".to_owned(),
contest_id: "abc107".to_owned(),
title: "Candles".to_owned(),
position: "C".to_owned()
},
AtCoderProblem {
id: "arc101_b".to_owned(),
contest_id: "abc107".to_owned(),
title: "Median of Medians".to_owned(),
position: "D".to_owned()
}
]
);
}
}
| 33.688172 | 86 | 0.437919 |
dd9d535b1bf13ecfb9225202dd8a2a17d7f68c32
| 31,892 |
// This is a part of Chrono.
// See README.md and LICENSE.txt for details.
//! The internal implementation of the calendar and ordinal date.
//!
//! The current implementation is optimized for determining year, month, day and day of week.
//! 4-bit `YearFlags` map to one of 14 possible classes of year in the Gregorian calendar,
//! which are included in every packed `NaiveDate` instance.
//! The conversion between the packed calendar date (`Mdf`) and the ordinal date (`Of`) is
//! based on the moderately-sized lookup table (~1.5KB)
//! and the packed representation is chosen for the efficient lookup.
//! Every internal data structure does not validate its input,
//! but the conversion keeps the valid value valid and the invalid value invalid
//! so that the user-facing `NaiveDate` can validate the input as late as possible.
#![allow(dead_code)] // some internal methods have been left for consistency
use std::{i32, fmt};
use num_traits::FromPrimitive;
use Weekday;
use div::{div_rem, mod_floor};
/// The internal date representation. This also includes the packed `Mdf` value.
pub type DateImpl = i32;
pub const MAX_YEAR: DateImpl = i32::MAX >> 13;
pub const MIN_YEAR: DateImpl = i32::MIN >> 13;
/// The year flags (aka the dominical letter).
///
/// There are 14 possible classes of year in the Gregorian calendar:
/// common and leap years starting with Monday through Sunday.
/// The `YearFlags` stores this information into 4 bits `abbb`,
/// where `a` is `1` for the common year (simplifies the `Of` validation)
/// and `bbb` is a non-zero `Weekday` (mapping `Mon` to 7) of the last day in the past year
/// (simplifies the day of week calculation from the 1-based ordinal).
#[derive(PartialEq, Eq, Copy, Clone)]
pub struct YearFlags(pub u8);
pub const A: YearFlags = YearFlags(0o15); pub const AG: YearFlags = YearFlags(0o05);
pub const B: YearFlags = YearFlags(0o14); pub const BA: YearFlags = YearFlags(0o04);
pub const C: YearFlags = YearFlags(0o13); pub const CB: YearFlags = YearFlags(0o03);
pub const D: YearFlags = YearFlags(0o12); pub const DC: YearFlags = YearFlags(0o02);
pub const E: YearFlags = YearFlags(0o11); pub const ED: YearFlags = YearFlags(0o01);
pub const F: YearFlags = YearFlags(0o17); pub const FE: YearFlags = YearFlags(0o07);
pub const G: YearFlags = YearFlags(0o16); pub const GF: YearFlags = YearFlags(0o06);
static YEAR_TO_FLAGS: [YearFlags; 400] = [
BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F,
ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B,
AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E,
DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A,
GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, // 100
C, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A,
GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D,
CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G,
FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C,
BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, // 200
E, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C,
BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F,
ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B,
AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E,
DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, // 300
G, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E,
DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A,
GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D,
CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G,
FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, // 400
];
static YEAR_DELTAS: [u8; 401] = [
0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5,
5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10,
10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15,
15, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20,
20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, // 100
25, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 28, 29, 29, 29,
29, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 33, 34, 34, 34,
34, 35, 35, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 39, 39,
39, 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44,
44, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 49, 49, 49, // 200
49, 49, 49, 49, 49, 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 52, 53, 53, 53,
53, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 57, 57, 57, 57, 58, 58, 58,
58, 59, 59, 59, 59, 60, 60, 60, 60, 61, 61, 61, 61, 62, 62, 62, 62, 63, 63, 63,
63, 64, 64, 64, 64, 65, 65, 65, 65, 66, 66, 66, 66, 67, 67, 67, 67, 68, 68, 68,
68, 69, 69, 69, 69, 70, 70, 70, 70, 71, 71, 71, 71, 72, 72, 72, 72, 73, 73, 73, // 300
73, 73, 73, 73, 73, 74, 74, 74, 74, 75, 75, 75, 75, 76, 76, 76, 76, 77, 77, 77,
77, 78, 78, 78, 78, 79, 79, 79, 79, 80, 80, 80, 80, 81, 81, 81, 81, 82, 82, 82,
82, 83, 83, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85, 86, 86, 86, 86, 87, 87, 87,
87, 88, 88, 88, 88, 89, 89, 89, 89, 90, 90, 90, 90, 91, 91, 91, 91, 92, 92, 92,
92, 93, 93, 93, 93, 94, 94, 94, 94, 95, 95, 95, 95, 96, 96, 96, 96, 97, 97, 97, 97 // 400+1
];
pub fn cycle_to_yo(cycle: u32) -> (u32, u32) {
let (mut year_mod_400, mut ordinal0) = div_rem(cycle, 365);
let delta = u32::from(YEAR_DELTAS[year_mod_400 as usize]);
if ordinal0 < delta {
year_mod_400 -= 1;
ordinal0 += 365 - u32::from(YEAR_DELTAS[year_mod_400 as usize]);
} else {
ordinal0 -= delta;
}
(year_mod_400, ordinal0 + 1)
}
pub fn yo_to_cycle(year_mod_400: u32, ordinal: u32) -> u32 {
year_mod_400 * 365 + u32::from(YEAR_DELTAS[year_mod_400 as usize]) + ordinal - 1
}
impl YearFlags {
#[inline]
pub fn from_year(year: i32) -> YearFlags {
let year = mod_floor(year, 400);
YearFlags::from_year_mod_400(year)
}
#[inline]
pub fn from_year_mod_400(year: i32) -> YearFlags {
YEAR_TO_FLAGS[year as usize]
}
#[inline]
pub fn ndays(&self) -> u32 {
let YearFlags(flags) = *self;
366 - u32::from(flags >> 3)
}
#[inline]
pub fn isoweek_delta(&self) -> u32 {
let YearFlags(flags) = *self;
let mut delta = u32::from(flags) & 0b0111;
if delta < 3 { delta += 7; }
delta
}
#[inline]
pub fn nisoweeks(&self) -> u32 {
let YearFlags(flags) = *self;
52 + ((0b0000_0100_0000_0110 >> flags as usize) & 1)
}
}
impl fmt::Debug for YearFlags {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let YearFlags(flags) = *self;
match flags {
0o15 => "A".fmt(f), 0o05 => "AG".fmt(f),
0o14 => "B".fmt(f), 0o04 => "BA".fmt(f),
0o13 => "C".fmt(f), 0o03 => "CB".fmt(f),
0o12 => "D".fmt(f), 0o02 => "DC".fmt(f),
0o11 => "E".fmt(f), 0o01 => "ED".fmt(f),
0o10 => "F?".fmt(f), 0o00 => "FE?".fmt(f), // non-canonical
0o17 => "F".fmt(f), 0o07 => "FE".fmt(f),
0o16 => "G".fmt(f), 0o06 => "GF".fmt(f),
_ => write!(f, "YearFlags({})", flags),
}
}
}
pub const MIN_OL: u32 = 1 << 1;
pub const MAX_OL: u32 = 366 << 1; // larger than the non-leap last day `(365 << 1) | 1`
pub const MIN_MDL: u32 = (1 << 6) | (1 << 1);
pub const MAX_MDL: u32 = (12 << 6) | (31 << 1) | 1;
const XX: i8 = -128;
static MDL_TO_OL: [i8; (MAX_MDL as usize + 1)] = [
XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX,
XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX,
XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX,
XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0
XX, XX, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, // 1
XX, XX, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, XX, XX, XX, XX, XX, // 2
XX, XX, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74,
72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74,
72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74,
72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, // 3
XX, XX, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76,
74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76,
74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76,
74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, XX, XX, // 4
XX, XX, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80,
78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80,
78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80,
78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, // 5
XX, XX, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82,
80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82,
80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82,
80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, XX, XX, // 6
XX, XX, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86,
84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86,
84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86,
84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, // 7
XX, XX, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88,
86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88,
86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88,
86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, // 8
XX, XX, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90,
88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90,
88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90,
88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, XX, XX, // 9
XX, XX, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94,
92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94,
92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94,
92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, // 10
XX, XX, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96,
94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96,
94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96,
94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, XX, XX, // 11
XX, XX, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100,
98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100,
98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100,
98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, // 12
];
static OL_TO_MDL: [u8; (MAX_OL as usize + 1)] = [
0, 0, // 0
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, // 1
66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
66, 66, 66, 66, 66, 66, 66, 66, 66, // 2
74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74,
72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74,
72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74,
72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, // 3
76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76,
74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76,
74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76,
74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, // 4
80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80,
78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80,
78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80,
78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, // 5
82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82,
80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82,
80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82,
80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, // 6
86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86,
84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86,
84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86,
84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, // 7
88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88,
86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88,
86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88,
86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, // 8
90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90,
88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90,
88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90,
88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, // 9
94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94,
92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94,
92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94,
92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, // 10
96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96,
94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96,
94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96,
94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, // 11
100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100,
98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100,
98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100,
98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98, // 12
];
/// Ordinal (day of year) and year flags: `(ordinal << 4) | flags`.
///
/// The whole bits except for the least 3 bits are referred as `Ol` (ordinal and leap flag),
/// which is an index to the `OL_TO_MDL` lookup table.
#[derive(PartialEq, PartialOrd, Copy, Clone)]
pub struct Of(pub u32);
impl Of {
#[inline]
fn clamp_ordinal(ordinal: u32) -> u32 {
if ordinal > 366 {0} else {ordinal}
}
#[inline]
pub fn new(ordinal: u32, YearFlags(flags): YearFlags) -> Of {
let ordinal = Of::clamp_ordinal(ordinal);
Of((ordinal << 4) | u32::from(flags))
}
#[inline]
pub fn from_mdf(Mdf(mdf): Mdf) -> Of {
let mdl = mdf >> 3;
match MDL_TO_OL.get(mdl as usize) {
Some(&v) => Of(mdf.wrapping_sub((i32::from(v) as u32 & 0x3ff) << 3)),
None => Of(0)
}
}
#[inline]
pub fn valid(&self) -> bool {
let Of(of) = *self;
let ol = of >> 3;
MIN_OL <= ol && ol <= MAX_OL
}
#[inline]
pub fn ordinal(&self) -> u32 {
let Of(of) = *self;
of >> 4
}
#[inline]
pub fn with_ordinal(&self, ordinal: u32) -> Of {
let ordinal = Of::clamp_ordinal(ordinal);
let Of(of) = *self;
Of((of & 0b1111) | (ordinal << 4))
}
#[inline]
pub fn flags(&self) -> YearFlags {
let Of(of) = *self;
YearFlags((of & 0b1111) as u8)
}
#[inline]
pub fn with_flags(&self, YearFlags(flags): YearFlags) -> Of {
let Of(of) = *self;
Of((of & !0b1111) | u32::from(flags))
}
#[inline]
pub fn weekday(&self) -> Weekday {
let Of(of) = *self;
Weekday::from_u32(((of >> 4) + (of & 0b111)) % 7).unwrap()
}
#[inline]
pub fn isoweekdate_raw(&self) -> (u32, Weekday) {
// week ordinal = ordinal + delta
let Of(of) = *self;
let weekord = (of >> 4).wrapping_add(self.flags().isoweek_delta());
(weekord / 7, Weekday::from_u32(weekord % 7).unwrap())
}
#[inline]
pub fn to_mdf(&self) -> Mdf {
Mdf::from_of(*self)
}
#[inline]
pub fn succ(&self) -> Of {
let Of(of) = *self;
Of(of + (1 << 4))
}
#[inline]
pub fn pred(&self) -> Of {
let Of(of) = *self;
Of(of - (1 << 4))
}
}
impl fmt::Debug for Of {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Of(of) = *self;
write!(f, "Of(({} << 4) | {:#04o} /*{:?}*/)",
of >> 4, of & 0b1111, YearFlags((of & 0b1111) as u8))
}
}
/// Month, day of month and year flags: `(month << 9) | (day << 4) | flags`
///
/// The whole bits except for the least 3 bits are referred as `Mdl`
/// (month, day of month and leap flag),
/// which is an index to the `MDL_TO_OL` lookup table.
#[derive(PartialEq, PartialOrd, Copy, Clone)]
pub struct Mdf(pub u32);
impl Mdf {
#[inline]
fn clamp_month(month: u32) -> u32 {
if month > 12 {0} else {month}
}
#[inline]
fn clamp_day(day: u32) -> u32 {
if day > 31 {0} else {day}
}
#[inline]
pub fn new(month: u32, day: u32, YearFlags(flags): YearFlags) -> Mdf {
let month = Mdf::clamp_month(month);
let day = Mdf::clamp_day(day);
Mdf((month << 9) | (day << 4) | u32::from(flags))
}
#[inline]
pub fn from_of(Of(of): Of) -> Mdf {
let ol = of >> 3;
match OL_TO_MDL.get(ol as usize) {
Some(&v) => Mdf(of + (u32::from(v) << 3)),
None => Mdf(0)
}
}
#[inline]
pub fn valid(&self) -> bool {
let Mdf(mdf) = *self;
let mdl = mdf >> 3;
match MDL_TO_OL.get(mdl as usize) {
Some(&v) => v >= 0,
None => false
}
}
#[inline]
pub fn month(&self) -> u32 {
let Mdf(mdf) = *self;
mdf >> 9
}
#[inline]
pub fn with_month(&self, month: u32) -> Mdf {
let month = Mdf::clamp_month(month);
let Mdf(mdf) = *self;
Mdf((mdf & 0b1_1111_1111) | (month << 9))
}
#[inline]
pub fn day(&self) -> u32 {
let Mdf(mdf) = *self;
(mdf >> 4) & 0b1_1111
}
#[inline]
pub fn with_day(&self, day: u32) -> Mdf {
let day = Mdf::clamp_day(day);
let Mdf(mdf) = *self;
Mdf((mdf & !0b1_1111_0000) | (day << 4))
}
#[inline]
pub fn flags(&self) -> YearFlags {
let Mdf(mdf) = *self;
YearFlags((mdf & 0b1111) as u8)
}
#[inline]
pub fn with_flags(&self, YearFlags(flags): YearFlags) -> Mdf {
let Mdf(mdf) = *self;
Mdf((mdf & !0b1111) | u32::from(flags))
}
#[inline]
pub fn to_of(&self) -> Of {
Of::from_mdf(*self)
}
}
impl fmt::Debug for Mdf {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Mdf(mdf) = *self;
write!(f, "Mdf(({} << 9) | ({} << 4) | {:#04o} /*{:?}*/)",
mdf >> 9, (mdf >> 4) & 0b1_1111, mdf & 0b1111, YearFlags((mdf & 0b1111) as u8))
}
}
#[cfg(test)]
mod tests {
#[cfg(test)] extern crate num_iter;
#[cfg(bench)] extern crate test;
use Weekday;
use super::{Of, Mdf};
use super::{YearFlags, A, B, C, D, E, F, G, AG, BA, CB, DC, ED, FE, GF};
use self::num_iter::range_inclusive;
use std::u32;
const NONLEAP_FLAGS: [YearFlags; 7] = [A, B, C, D, E, F, G];
const LEAP_FLAGS: [YearFlags; 7] = [AG, BA, CB, DC, ED, FE, GF];
const FLAGS: [YearFlags; 14] = [A, B, C, D, E, F, G, AG, BA, CB, DC, ED, FE, GF];
#[test]
fn test_year_flags_ndays_from_year() {
assert_eq!(YearFlags::from_year(2014).ndays(), 365);
assert_eq!(YearFlags::from_year(2012).ndays(), 366);
assert_eq!(YearFlags::from_year(2000).ndays(), 366);
assert_eq!(YearFlags::from_year(1900).ndays(), 365);
assert_eq!(YearFlags::from_year(1600).ndays(), 366);
assert_eq!(YearFlags::from_year( 1).ndays(), 365);
assert_eq!(YearFlags::from_year( 0).ndays(), 366); // 1 BCE (proleptic Gregorian)
assert_eq!(YearFlags::from_year( -1).ndays(), 365); // 2 BCE
assert_eq!(YearFlags::from_year( -4).ndays(), 366); // 5 BCE
assert_eq!(YearFlags::from_year( -99).ndays(), 365); // 100 BCE
assert_eq!(YearFlags::from_year(-100).ndays(), 365); // 101 BCE
assert_eq!(YearFlags::from_year(-399).ndays(), 365); // 400 BCE
assert_eq!(YearFlags::from_year(-400).ndays(), 366); // 401 BCE
}
#[test]
fn test_year_flags_nisoweeks() {
assert_eq!(A.nisoweeks(), 52);
assert_eq!(B.nisoweeks(), 52);
assert_eq!(C.nisoweeks(), 52);
assert_eq!(D.nisoweeks(), 53);
assert_eq!(E.nisoweeks(), 52);
assert_eq!(F.nisoweeks(), 52);
assert_eq!(G.nisoweeks(), 52);
assert_eq!(AG.nisoweeks(), 52);
assert_eq!(BA.nisoweeks(), 52);
assert_eq!(CB.nisoweeks(), 52);
assert_eq!(DC.nisoweeks(), 53);
assert_eq!(ED.nisoweeks(), 53);
assert_eq!(FE.nisoweeks(), 52);
assert_eq!(GF.nisoweeks(), 52);
}
#[cfg(bench)]
#[bench]
fn bench_year_flags_from_year(bh: &mut test::Bencher) {
bh.iter(|| {
for year in -999i32..1000 {
YearFlags::from_year(year);
}
});
}
#[test]
fn test_of() {
fn check(expected: bool, flags: YearFlags, ordinal1: u32, ordinal2: u32) {
for ordinal in range_inclusive(ordinal1, ordinal2) {
let of = Of::new(ordinal, flags);
assert!(of.valid() == expected,
"ordinal {} = {:?} should be {} for dominical year {:?}",
ordinal, of, if expected {"valid"} else {"invalid"}, flags);
}
}
for &flags in NONLEAP_FLAGS.iter() {
check(false, flags, 0, 0);
check(true, flags, 1, 365);
check(false, flags, 366, 1024);
check(false, flags, u32::MAX, u32::MAX);
}
for &flags in LEAP_FLAGS.iter() {
check(false, flags, 0, 0);
check(true, flags, 1, 366);
check(false, flags, 367, 1024);
check(false, flags, u32::MAX, u32::MAX);
}
}
#[test]
fn test_mdf_valid() {
fn check(expected: bool, flags: YearFlags, month1: u32, day1: u32,
month2: u32, day2: u32) {
for month in range_inclusive(month1, month2) {
for day in range_inclusive(day1, day2) {
let mdf = Mdf::new(month, day, flags);
assert!(mdf.valid() == expected,
"month {} day {} = {:?} should be {} for dominical year {:?}",
month, day, mdf, if expected {"valid"} else {"invalid"}, flags);
}
}
}
for &flags in NONLEAP_FLAGS.iter() {
check(false, flags, 0, 0, 0, 1024);
check(false, flags, 0, 0, 16, 0);
check(true, flags, 1, 1, 1, 31); check(false, flags, 1, 32, 1, 1024);
check(true, flags, 2, 1, 2, 28); check(false, flags, 2, 29, 2, 1024);
check(true, flags, 3, 1, 3, 31); check(false, flags, 3, 32, 3, 1024);
check(true, flags, 4, 1, 4, 30); check(false, flags, 4, 31, 4, 1024);
check(true, flags, 5, 1, 5, 31); check(false, flags, 5, 32, 5, 1024);
check(true, flags, 6, 1, 6, 30); check(false, flags, 6, 31, 6, 1024);
check(true, flags, 7, 1, 7, 31); check(false, flags, 7, 32, 7, 1024);
check(true, flags, 8, 1, 8, 31); check(false, flags, 8, 32, 8, 1024);
check(true, flags, 9, 1, 9, 30); check(false, flags, 9, 31, 9, 1024);
check(true, flags, 10, 1, 10, 31); check(false, flags, 10, 32, 10, 1024);
check(true, flags, 11, 1, 11, 30); check(false, flags, 11, 31, 11, 1024);
check(true, flags, 12, 1, 12, 31); check(false, flags, 12, 32, 12, 1024);
check(false, flags, 13, 0, 16, 1024);
check(false, flags, u32::MAX, 0, u32::MAX, 1024);
check(false, flags, 0, u32::MAX, 16, u32::MAX);
check(false, flags, u32::MAX, u32::MAX, u32::MAX, u32::MAX);
}
for &flags in LEAP_FLAGS.iter() {
check(false, flags, 0, 0, 0, 1024);
check(false, flags, 0, 0, 16, 0);
check(true, flags, 1, 1, 1, 31); check(false, flags, 1, 32, 1, 1024);
check(true, flags, 2, 1, 2, 29); check(false, flags, 2, 30, 2, 1024);
check(true, flags, 3, 1, 3, 31); check(false, flags, 3, 32, 3, 1024);
check(true, flags, 4, 1, 4, 30); check(false, flags, 4, 31, 4, 1024);
check(true, flags, 5, 1, 5, 31); check(false, flags, 5, 32, 5, 1024);
check(true, flags, 6, 1, 6, 30); check(false, flags, 6, 31, 6, 1024);
check(true, flags, 7, 1, 7, 31); check(false, flags, 7, 32, 7, 1024);
check(true, flags, 8, 1, 8, 31); check(false, flags, 8, 32, 8, 1024);
check(true, flags, 9, 1, 9, 30); check(false, flags, 9, 31, 9, 1024);
check(true, flags, 10, 1, 10, 31); check(false, flags, 10, 32, 10, 1024);
check(true, flags, 11, 1, 11, 30); check(false, flags, 11, 31, 11, 1024);
check(true, flags, 12, 1, 12, 31); check(false, flags, 12, 32, 12, 1024);
check(false, flags, 13, 0, 16, 1024);
check(false, flags, u32::MAX, 0, u32::MAX, 1024);
check(false, flags, 0, u32::MAX, 16, u32::MAX);
check(false, flags, u32::MAX, u32::MAX, u32::MAX, u32::MAX);
}
}
#[test]
fn test_of_fields() {
for &flags in FLAGS.iter() {
for ordinal in range_inclusive(1u32, 366) {
let of = Of::new(ordinal, flags);
if of.valid() {
assert_eq!(of.ordinal(), ordinal);
}
}
}
}
#[test]
fn test_of_with_fields() {
fn check(flags: YearFlags, ordinal: u32) {
let of = Of::new(ordinal, flags);
for ordinal in range_inclusive(0u32, 1024) {
let of = of.with_ordinal(ordinal);
assert_eq!(of.valid(), Of::new(ordinal, flags).valid());
if of.valid() {
assert_eq!(of.ordinal(), ordinal);
}
}
}
for &flags in NONLEAP_FLAGS.iter() {
check(flags, 1);
check(flags, 365);
}
for &flags in LEAP_FLAGS.iter() {
check(flags, 1);
check(flags, 366);
}
}
#[test]
fn test_of_weekday() {
assert_eq!(Of::new(1, A).weekday(), Weekday::Sun);
assert_eq!(Of::new(1, B).weekday(), Weekday::Sat);
assert_eq!(Of::new(1, C).weekday(), Weekday::Fri);
assert_eq!(Of::new(1, D).weekday(), Weekday::Thu);
assert_eq!(Of::new(1, E).weekday(), Weekday::Wed);
assert_eq!(Of::new(1, F).weekday(), Weekday::Tue);
assert_eq!(Of::new(1, G).weekday(), Weekday::Mon);
assert_eq!(Of::new(1, AG).weekday(), Weekday::Sun);
assert_eq!(Of::new(1, BA).weekday(), Weekday::Sat);
assert_eq!(Of::new(1, CB).weekday(), Weekday::Fri);
assert_eq!(Of::new(1, DC).weekday(), Weekday::Thu);
assert_eq!(Of::new(1, ED).weekday(), Weekday::Wed);
assert_eq!(Of::new(1, FE).weekday(), Weekday::Tue);
assert_eq!(Of::new(1, GF).weekday(), Weekday::Mon);
for &flags in FLAGS.iter() {
let mut prev = Of::new(1, flags).weekday();
for ordinal in range_inclusive(2u32, flags.ndays()) {
let of = Of::new(ordinal, flags);
let expected = prev.succ();
assert_eq!(of.weekday(), expected);
prev = expected;
}
}
}
#[test]
fn test_mdf_fields() {
for &flags in FLAGS.iter() {
for month in range_inclusive(1u32, 12) {
for day in range_inclusive(1u32, 31) {
let mdf = Mdf::new(month, day, flags);
if mdf.valid() {
assert_eq!(mdf.month(), month);
assert_eq!(mdf.day(), day);
}
}
}
}
}
#[test]
fn test_mdf_with_fields() {
fn check(flags: YearFlags, month: u32, day: u32) {
let mdf = Mdf::new(month, day, flags);
for month in range_inclusive(0u32, 16) {
let mdf = mdf.with_month(month);
assert_eq!(mdf.valid(), Mdf::new(month, day, flags).valid());
if mdf.valid() {
assert_eq!(mdf.month(), month);
assert_eq!(mdf.day(), day);
}
}
for day in range_inclusive(0u32, 1024) {
let mdf = mdf.with_day(day);
assert_eq!(mdf.valid(), Mdf::new(month, day, flags).valid());
if mdf.valid() {
assert_eq!(mdf.month(), month);
assert_eq!(mdf.day(), day);
}
}
}
for &flags in NONLEAP_FLAGS.iter() {
check(flags, 1, 1);
check(flags, 1, 31);
check(flags, 2, 1);
check(flags, 2, 28);
check(flags, 2, 29);
check(flags, 12, 31);
}
for &flags in LEAP_FLAGS.iter() {
check(flags, 1, 1);
check(flags, 1, 31);
check(flags, 2, 1);
check(flags, 2, 29);
check(flags, 2, 30);
check(flags, 12, 31);
}
}
#[test]
fn test_of_isoweekdate_raw() {
for &flags in FLAGS.iter() {
// January 4 should be in the first week
let (week, _) = Of::new(4 /* January 4 */, flags).isoweekdate_raw();
assert_eq!(week, 1);
}
}
#[test]
fn test_of_to_mdf() {
for i in range_inclusive(0u32, 8192) {
let of = Of(i);
assert_eq!(of.valid(), of.to_mdf().valid());
}
}
#[test]
fn test_mdf_to_of() {
for i in range_inclusive(0u32, 8192) {
let mdf = Mdf(i);
assert_eq!(mdf.valid(), mdf.to_of().valid());
}
}
#[test]
fn test_of_to_mdf_to_of() {
for i in range_inclusive(0u32, 8192) {
let of = Of(i);
if of.valid() {
assert_eq!(of, of.to_mdf().to_of());
}
}
}
#[test]
fn test_mdf_to_of_to_mdf() {
for i in range_inclusive(0u32, 8192) {
let mdf = Mdf(i);
if mdf.valid() {
assert_eq!(mdf, mdf.to_of().to_mdf());
}
}
}
}
| 40.887179 | 95 | 0.494764 |
d961abe3e14b5eb1fc9f7ee6ddb39023e80ce1ec
| 3,259 |
//! Convenience wrapper for streams to switch between plain TCP and TLS at runtime.
//!
//! There is no dependency on actual TLS implementations. Everything like
//! `native_tls` or `openssl` will work as long as there is a TLS stream supporting standard
//! `Read + Write` traits.
use std::io::{Error as IoError, Read, Result as IoResult, Write};
use std::net::SocketAddr;
use bytes::{Buf, BufMut};
use futures::Poll;
use tokio_io::{AsyncRead, AsyncWrite};
/// Trait to switch TCP_NODELAY.
pub trait NoDelay {
/// Set the TCP_NODELAY option to the given value.
fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()>;
}
/// Trait to get the remote address from the underlying stream.
pub trait PeerAddr {
/// Returns the remote address that this stream is connected to.
fn peer_addr(&self) -> IoResult<SocketAddr>;
}
/// Stream, either plain TCP or TLS.
pub enum Stream<S, T> {
/// Unencrypted socket stream.
Plain(S),
/// Encrypted socket stream.
Tls(T),
}
impl<S: Read, T: Read> Read for Stream<S, T> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
match *self {
Stream::Plain(ref mut s) => s.read(buf),
Stream::Tls(ref mut s) => s.read(buf),
}
}
}
impl<S: Write, T: Write> Write for Stream<S, T> {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
match *self {
Stream::Plain(ref mut s) => s.write(buf),
Stream::Tls(ref mut s) => s.write(buf),
}
}
fn flush(&mut self) -> IoResult<()> {
match *self {
Stream::Plain(ref mut s) => s.flush(),
Stream::Tls(ref mut s) => s.flush(),
}
}
}
impl<S: NoDelay, T: NoDelay> NoDelay for Stream<S, T> {
fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()> {
match *self {
Stream::Plain(ref mut s) => s.set_nodelay(nodelay),
Stream::Tls(ref mut s) => s.set_nodelay(nodelay),
}
}
}
impl<S: PeerAddr, T: PeerAddr> PeerAddr for Stream<S, T> {
fn peer_addr(&self) -> IoResult<SocketAddr> {
match *self {
Stream::Plain(ref s) => s.peer_addr(),
Stream::Tls(ref s) => s.peer_addr(),
}
}
}
impl<S: AsyncRead, T: AsyncRead> AsyncRead for Stream<S, T> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
match *self {
Stream::Plain(ref s) => s.prepare_uninitialized_buffer(buf),
Stream::Tls(ref s) => s.prepare_uninitialized_buffer(buf),
}
}
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, IoError> {
match *self {
Stream::Plain(ref mut s) => s.read_buf(buf),
Stream::Tls(ref mut s) => s.read_buf(buf),
}
}
}
impl<S: AsyncWrite, T: AsyncWrite> AsyncWrite for Stream<S, T> {
fn shutdown(&mut self) -> Poll<(), IoError> {
match *self {
Stream::Plain(ref mut s) => s.shutdown(),
Stream::Tls(ref mut s) => s.shutdown(),
}
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, IoError> {
match *self {
Stream::Plain(ref mut s) => s.write_buf(buf),
Stream::Tls(ref mut s) => s.write_buf(buf),
}
}
}
| 31.038095 | 92 | 0.573489 |
e5740b52e233571ddc4d38e6e5729166d2080b99
| 542 |
extern crate bigdecimal;
#[macro_use]
extern crate diesel;
extern crate diesel_dynamic_schema;
#[macro_use]
extern crate diesel_migrations;
#[macro_use]
extern crate failure;
extern crate fallible_iterator;
extern crate futures;
extern crate graph;
extern crate postgres;
extern crate serde;
extern crate uuid;
mod chain_head_listener;
pub mod db_schema;
mod entity_changes;
mod filter;
pub mod functions;
pub mod models;
pub mod store;
pub use self::chain_head_listener::ChainHeadUpdateListener;
pub use self::store::{Store, StoreConfig};
| 20.846154 | 59 | 0.809963 |
d6727750314eb33e7e4479043fcf43128c788953
| 2,337 |
use std::collections::hash_map::DefaultHasher;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use lazy_static::lazy_static;
use crate::dilbert::search::SearchResult;
use crate::dilbert::tags::Tag;
#[derive(Default)]
struct CacheData {
results: HashMap<SearchResultID, SearchResult>,
tags: HashMap<Tag, Vec<SearchResultID>>,
}
#[derive(Hash, Eq, PartialEq, Copy, Clone)]
struct SearchResultID(u64);
impl SearchResult {
fn id(&self) -> SearchResultID {
let mut h = DefaultHasher::new();
self.page.hash(&mut h);
SearchResultID(h.finish())
}
}
pub struct TagBasedCache {
data: std::sync::RwLock<CacheData>,
}
lazy_static! {
pub static ref DILBERT_CACHE: TagBasedCache = TagBasedCache {
data: std::sync::RwLock::new(CacheData::default())
};
}
impl TagBasedCache {
pub fn add(&self, tags: &[Tag], result: &SearchResult) {
let mut cache_data = self.data.write().unwrap();
log::info!(
"Adding an item to cache. Cache contains ${} pages and ${} tags.",
cache_data.results.len(),
cache_data.tags.len()
);
let result_id = result.id();
if cache_data.results.contains_key(&result_id) {
return;
}
cache_data.results.insert(result_id, result.clone());
for tag in tags {
cache_data
.tags
.entry(tag.clone())
.or_default()
.push(result_id)
}
}
pub fn find(&self, tags: &[Tag], limit: usize) -> Vec<SearchResult> {
let cache_data = self.data.read().unwrap();
let mut occurens = HashMap::<SearchResultID, usize>::default();
for tag in tags {
if let Some(results) = cache_data.tags.get(tag) {
for result_id in results {
*occurens.entry(*result_id).or_insert(0) += 1;
}
} else {
continue;
}
}
let mut occurens = occurens
.into_iter()
.collect::<Vec<(SearchResultID, usize)>>();
occurens.sort_unstable_by_key(|(_, count)| *count);
occurens
.iter()
.map(|(id, _)| cache_data.results[id].clone())
.take(limit)
.collect()
}
}
| 25.966667 | 78 | 0.556269 |
38d7bd3300851b6d891ef0cf5d52b217bf9b99d9
| 779 |
use super::*;
use assert2::assert;
#[test]
fn on_empty_container_builder_increases_len_to_1() {
// Given an empty list
let item = 42_i32;
let expected_len = 1;
let sut = ContainerBuilder::new();
assert!(sut.is_empty());
// When
let result = sut.register_instance(item);
// Then
assert!(result.len() == expected_len);
}
#[test]
fn on_container_with_len_1_increases_len_to_2() {
// Given an list containing one item
let given_item = 42_i32;
let item = Foo { _bar: -7, _baz: "test" };
let expected_len = 2;
let sut = ContainerBuilder::new().register_instance(given_item);
assert!(sut.len() == 1);
// And When
let result = sut.register_instance(item);
// Then
assert!(result.len() == expected_len);
}
| 22.911765 | 68 | 0.641849 |
71a7b010515e3fd2b17871ddcf17515cfdd76041
| 6,481 |
use crate::utils::{
match_def_path, match_qpath, paths, snippet_with_applicability, span_help_and_lint, span_lint_and_sugg,
};
use if_chain::if_chain;
use rustc::declare_lint_pass;
use rustc::hir::{BorrowKind, Expr, ExprKind, Mutability, QPath};
use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
use rustc_errors::Applicability;
use rustc_session::declare_tool_lint;
declare_clippy_lint! {
/// **What it does:** Checks for `mem::replace()` on an `Option` with
/// `None`.
///
/// **Why is this bad?** `Option` already has the method `take()` for
/// taking its current value (Some(..) or None) and replacing it with
/// `None`.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// use std::mem;
///
/// let mut an_option = Some(0);
/// let replaced = mem::replace(&mut an_option, None);
/// ```
/// Is better expressed with:
/// ```rust
/// let mut an_option = Some(0);
/// let taken = an_option.take();
/// ```
pub MEM_REPLACE_OPTION_WITH_NONE,
style,
"replacing an `Option` with `None` instead of `take()`"
}
declare_clippy_lint! {
/// **What it does:** Checks for `mem::replace(&mut _, mem::uninitialized())`
/// and `mem::replace(&mut _, mem::zeroed())`.
///
/// **Why is this bad?** This will lead to undefined behavior even if the
/// value is overwritten later, because the uninitialized value may be
/// observed in the case of a panic.
///
/// **Known problems:** None.
///
/// **Example:**
///
/// ```
/// use std::mem;
///# fn may_panic(v: Vec<i32>) -> Vec<i32> { v }
///
/// #[allow(deprecated, invalid_value)]
/// fn myfunc (v: &mut Vec<i32>) {
/// let taken_v = unsafe { mem::replace(v, mem::uninitialized()) };
/// let new_v = may_panic(taken_v); // undefined behavior on panic
/// mem::forget(mem::replace(v, new_v));
/// }
/// ```
///
/// The [take_mut](https://docs.rs/take_mut) crate offers a sound solution,
/// at the cost of either lazily creating a replacement value or aborting
/// on panic, to ensure that the uninitialized value cannot be observed.
pub MEM_REPLACE_WITH_UNINIT,
correctness,
"`mem::replace(&mut _, mem::uninitialized())` or `mem::replace(&mut _, mem::zeroed())`"
}
declare_lint_pass!(MemReplace =>
[MEM_REPLACE_OPTION_WITH_NONE, MEM_REPLACE_WITH_UNINIT]);
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MemReplace {
fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
if_chain! {
// Check that `expr` is a call to `mem::replace()`
if let ExprKind::Call(ref func, ref func_args) = expr.kind;
if func_args.len() == 2;
if let ExprKind::Path(ref func_qpath) = func.kind;
if let Some(def_id) = cx.tables.qpath_res(func_qpath, func.hir_id).opt_def_id();
if match_def_path(cx, def_id, &paths::MEM_REPLACE);
// Check that second argument is `Option::None`
then {
if let ExprKind::Path(ref replacement_qpath) = func_args[1].kind {
if match_qpath(replacement_qpath, &paths::OPTION_NONE) {
// Since this is a late pass (already type-checked),
// and we already know that the second argument is an
// `Option`, we do not need to check the first
// argument's type. All that's left is to get
// replacee's path.
let replaced_path = match func_args[0].kind {
ExprKind::AddrOf(BorrowKind::Ref, Mutability::Mutable, ref replaced) => {
if let ExprKind::Path(QPath::Resolved(None, ref replaced_path)) = replaced.kind {
replaced_path
} else {
return
}
},
ExprKind::Path(QPath::Resolved(None, ref replaced_path)) => replaced_path,
_ => return,
};
let mut applicability = Applicability::MachineApplicable;
span_lint_and_sugg(
cx,
MEM_REPLACE_OPTION_WITH_NONE,
expr.span,
"replacing an `Option` with `None`",
"consider `Option::take()` instead",
format!("{}.take()", snippet_with_applicability(cx, replaced_path.span, "", &mut applicability)),
applicability,
);
}
}
if let ExprKind::Call(ref repl_func, ref repl_args) = func_args[1].kind {
if_chain! {
if repl_args.is_empty();
if let ExprKind::Path(ref repl_func_qpath) = repl_func.kind;
if let Some(repl_def_id) = cx.tables.qpath_res(repl_func_qpath, repl_func.hir_id).opt_def_id();
then {
if match_def_path(cx, repl_def_id, &paths::MEM_UNINITIALIZED) {
span_help_and_lint(
cx,
MEM_REPLACE_WITH_UNINIT,
expr.span,
"replacing with `mem::uninitialized()`",
"consider using the `take_mut` crate instead",
);
} else if match_def_path(cx, repl_def_id, &paths::MEM_ZEROED) &&
!cx.tables.expr_ty(&func_args[1]).is_primitive() {
span_help_and_lint(
cx,
MEM_REPLACE_WITH_UNINIT,
expr.span,
"replacing with `mem::zeroed()`",
"consider using a default value or the `take_mut` crate instead",
);
}
}
}
}
}
}
}
}
| 43.790541 | 125 | 0.489739 |
fe826c8a51cc645baecac3f1df962b9f05881619
| 1,037 |
#GLOBAL
struct VSOutPSIn
{
float4 Position_VSPS : SV_POSITION0;
float3 Normal_VSPS : TEXCOORD0;
float2 UV_VSPS : TEXCOORD1;
};
#END
#VS
struct VSIn
{
float3 Position_VS : POSITION0;
float3 Normal_VS : NORMAL0;
float2 UV_VS : TEXCOORD0;
};
float4x4 Camera;
float4x4 Transform;
VSOutPSIn main(VSIn In)
{
VSOutPSIn Out;
float4 loc = mul(Transform, float4(In.Position_VS, 1));
Out.Position_VSPS = mul(Camera, loc);
Out.Normal_VSPS = mul(Transform, float4(In.Normal_VS, 0)).xyz;
Out.UV_VSPS = float2(In.UV_VS.x, 1.0-In.UV_VS.y);
return Out;
}
#END
#PS
struct PSOut
{
float4 Color_PS : SV_TARGET0;
};
float3 LightDirection;
float3 LightDirection2;
float4 LightColor;
float4 LightColor2;
sampler2D Diffuse;
PSOut main(VSOutPSIn In)
{
PSOut Out;
float3 normal = normalize(In.Normal_VSPS);
float light = max(dot(-LightDirection, normal), 0.0);
float light2 = max(dot(-LightDirection2, normal), 0.0);
Out.Color_PS = tex2D(Diffuse, In.UV_VSPS) * ((light * LightColor) + (light2 * LightColor2));
return Out;
}
#END
| 17 | 93 | 0.72324 |
61b49adff14479c670037409afa37c8403abc83b
| 3,639 |
use std;
use std::result;
#[allow(unused_imports)]
use log::{debug, info, warn, trace, error};
use mio_extras::channel::{TrySendError};
/// This is a specialized Result, similar to std::io::Result
pub type Result<T> = result::Result<T, Error>;
/// This roughly corresponds to "Return codes" in DDS spec 2.2.1.1 Format and Conventions
///
/// Deviations from the DDS spec:
/// * `OK` is not included. It is not an error. Ok/Error should be distinguished with the `Result` type.
/// * `Error` is too unspecific.
/// * `AlreadyDeleted` We should use Rust type system to avoid these, so no need for run-time error.
/// * `Timeout` This is normal operation and should be encoded as `Option` or `Result`
/// * `NoData` This should be encoded as `Option<SomeData>`, not an error code.
#[derive(Debug)]
pub enum Error {
/// Illegal parameter value.
BadParameter { reason: String },
/// Unsupported operation. Can only be returned by operations that are optional.
Unsupported,
/// Service ran out of the resources needed to complete the operation.
OutOfResources,
/// Operation invoked on an Entity that is not yet enabled.
NotEnabled,
/// Application attempted to modify an immutable QosPolicy.
ImmutablePolicy, // can we check this statically?
/// Application specified a set of policies that are not consistent with each other.
InconsistentPolicy { reason: String },
/// A pre-condition for the operation was not met.
PreconditionNotMet { precondition: String },
/// An operation was invoked on an inappropriate object or at
/// an inappropriate time (as determined by policies set by the
/// specification or the Service implementation). There is no
/// precondition that could be changed to make the operation
/// succeed.
IllegalOperation { reason: String },
// Our own additions to the DDS spec below:
/// Synchronization with another thread failed because the [other thread
/// has exited while holding a lock.](https://doc.rust-lang.org/std/sync/struct.PoisonError.html)
/// Does not exist in the DDS spec.
LockPoisoned,
/// Something that should not go wrong went wrong anyway.
/// This is usually a bug in RustDDS
Internal { reason: String },
Io { inner: std::io::Error },
Serialization { reason: String },
Discovery {reason: String},
}
impl Error {
pub fn bad_parameter<T>(reason: &str) -> Result<T>
{
Err( Error::BadParameter{ reason: reason.to_string() })
}
pub fn precondition_not_met<T>(precondition: &str) -> Result<T>
{
Err( Error::PreconditionNotMet{ precondition: precondition.to_string() })
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! log_and_err_precondition_not_met {
($err_msg:literal) => (
{ error!($err_msg);
Error::precondition_not_met($err_msg)
}
)
}
#[doc(hidden)]
#[macro_export]
macro_rules! log_and_err_internal {
($($arg:tt)*) => (
{ error!($($arg)*);
Err( Error::Internal{ reason: format!($($arg)*) } )
}
)
}
#[doc(hidden)]
#[macro_export]
macro_rules! log_and_err_discovery {
($($arg:tt)*) => (
{ error!($($arg)*);
Error::Message(format!($($arg)*) )
}
)
}
impl From<std::io::Error> for Error {
fn from(e:std::io::Error) -> Error {
Error::Io { inner: e }
}
}
impl<T> From<std::sync::PoisonError<T>> for Error {
fn from(_e : std::sync::PoisonError<T>) -> Error {
Error::LockPoisoned
}
}
impl<T> From<TrySendError<T>> for Error
where TrySendError<T> : std::error::Error
{
fn from(e : TrySendError<T>) -> Error {
Error::Internal{reason: format!("Cannot send to internal mio channel: {:?}",e) }
}
}
| 29.827869 | 104 | 0.670789 |
79e7410c3a8381183f256da06d11fb7a446db435
| 5,794 |
use clippy_utils::consts::{constant, Constant};
use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::higher;
use clippy_utils::source::snippet_with_applicability;
use clippy_utils::ty::is_copy;
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::{BorrowKind, Expr, ExprKind, Mutability};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::{self, Ty};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::source_map::Span;
#[allow(clippy::module_name_repetitions)]
#[derive(Copy, Clone)]
pub struct UselessVec {
pub too_large_for_stack: u64,
}
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of `&vec![..]` when using `&[..]` would
/// be possible.
///
/// ### Why is this bad?
/// This is less efficient.
///
/// ### Example
/// ```rust
/// # fn foo(my_vec: &[u8]) {}
///
/// // Bad
/// foo(&vec![1, 2]);
///
/// // Good
/// foo(&[1, 2]);
/// ```
#[clippy::version = "pre 1.29.0"]
pub USELESS_VEC,
perf,
"useless `vec!`"
}
impl_lint_pass!(UselessVec => [USELESS_VEC]);
impl<'tcx> LateLintPass<'tcx> for UselessVec {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
// search for `&vec![_]` expressions where the adjusted type is `&[_]`
if_chain! {
if let ty::Ref(_, ty, _) = cx.typeck_results().expr_ty_adjusted(expr).kind();
if let ty::Slice(..) = ty.kind();
if let ExprKind::AddrOf(BorrowKind::Ref, mutability, addressee) = expr.kind;
if let Some(vec_args) = higher::VecArgs::hir(cx, addressee);
then {
self.check_vec_macro(cx, &vec_args, mutability, expr.span);
}
}
// search for `for _ in vec![…]`
if_chain! {
if let Some(higher::ForLoop { arg, .. }) = higher::ForLoop::hir(expr);
if let Some(vec_args) = higher::VecArgs::hir(cx, arg);
if is_copy(cx, vec_type(cx.typeck_results().expr_ty_adjusted(arg)));
then {
// report the error around the `vec!` not inside `<std macros>:`
let span = arg.span.ctxt().outer_expn_data().call_site;
self.check_vec_macro(cx, &vec_args, Mutability::Not, span);
}
}
}
}
impl UselessVec {
fn check_vec_macro<'tcx>(
self,
cx: &LateContext<'tcx>,
vec_args: &higher::VecArgs<'tcx>,
mutability: Mutability,
span: Span,
) {
let mut applicability = Applicability::MachineApplicable;
let snippet = match *vec_args {
higher::VecArgs::Repeat(elem, len) => {
if let Some((Constant::Int(len_constant), _)) = constant(cx, cx.typeck_results(), len) {
#[allow(clippy::cast_possible_truncation)]
if len_constant as u64 * size_of(cx, elem) > self.too_large_for_stack {
return;
}
match mutability {
Mutability::Mut => {
format!(
"&mut [{}; {}]",
snippet_with_applicability(cx, elem.span, "elem", &mut applicability),
snippet_with_applicability(cx, len.span, "len", &mut applicability)
)
},
Mutability::Not => {
format!(
"&[{}; {}]",
snippet_with_applicability(cx, elem.span, "elem", &mut applicability),
snippet_with_applicability(cx, len.span, "len", &mut applicability)
)
},
}
} else {
return;
}
},
higher::VecArgs::Vec(args) => {
if let Some(last) = args.iter().last() {
#[allow(clippy::cast_possible_truncation)]
if args.len() as u64 * size_of(cx, last) > self.too_large_for_stack {
return;
}
let span = args[0].span.to(last.span);
match mutability {
Mutability::Mut => {
format!(
"&mut [{}]",
snippet_with_applicability(cx, span, "..", &mut applicability)
)
},
Mutability::Not => {
format!("&[{}]", snippet_with_applicability(cx, span, "..", &mut applicability))
},
}
} else {
match mutability {
Mutability::Mut => "&mut []".into(),
Mutability::Not => "&[]".into(),
}
}
},
};
span_lint_and_sugg(
cx,
USELESS_VEC,
span,
"useless use of `vec!`",
"you can use a slice directly",
snippet,
applicability,
);
}
}
fn size_of(cx: &LateContext<'_>, expr: &Expr<'_>) -> u64 {
let ty = cx.typeck_results().expr_ty_adjusted(expr);
cx.layout_of(ty).map_or(0, |l| l.size.bytes())
}
/// Returns the item type of the vector (i.e., the `T` in `Vec<T>`).
fn vec_type(ty: Ty<'_>) -> Ty<'_> {
if let ty::Adt(_, substs) = ty.kind() {
substs.type_at(0)
} else {
panic!("The type of `vec!` is a not a struct?");
}
}
| 35.329268 | 108 | 0.476527 |
4a11ac11680906c076346e0728e1e5ee06e1b819
| 34,793 |
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt;
use rustc::hir;
use rustc::mir::*;
use rustc::middle::const_val::{ConstInt, ConstVal};
use rustc::middle::lang_items;
use rustc::ty::{self, Ty};
use rustc::ty::subst::{Kind, Substs};
use rustc::ty::util::IntTypeExt;
use rustc_data_structures::indexed_vec::Idx;
use util::patch::MirPatch;
use std::iter;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DropFlagState {
Present, // i.e. initialized
Absent, // i.e. deinitialized or "moved"
}
impl DropFlagState {
pub fn value(self) -> bool {
match self {
DropFlagState::Present => true,
DropFlagState::Absent => false
}
}
}
#[derive(Debug)]
pub enum DropStyle {
Dead,
Static,
Conditional,
Open,
}
#[derive(Debug)]
pub enum DropFlagMode {
Shallow,
Deep
}
#[derive(Copy, Clone, Debug)]
pub enum Unwind {
To(BasicBlock),
InCleanup
}
impl Unwind {
fn is_cleanup(self) -> bool {
match self {
Unwind::To(..) => false,
Unwind::InCleanup => true
}
}
fn into_option(self) -> Option<BasicBlock> {
match self {
Unwind::To(bb) => Some(bb),
Unwind::InCleanup => None,
}
}
fn map<F>(self, f: F) -> Self where F: FnOnce(BasicBlock) -> BasicBlock {
match self {
Unwind::To(bb) => Unwind::To(f(bb)),
Unwind::InCleanup => Unwind::InCleanup
}
}
}
pub trait DropElaborator<'a, 'tcx: 'a> : fmt::Debug {
type Path : Copy + fmt::Debug;
fn patch(&mut self) -> &mut MirPatch<'tcx>;
fn mir(&self) -> &'a Mir<'tcx>;
fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx>;
fn param_env(&self) -> ty::ParamEnv<'tcx>;
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle;
fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>>;
fn clear_drop_flag(&mut self, location: Location, path: Self::Path, mode: DropFlagMode);
fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path>;
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
fn downcast_subpath(&self, path: Self::Path, variant: usize) -> Option<Self::Path>;
}
#[derive(Debug)]
struct DropCtxt<'l, 'b: 'l, 'tcx: 'b, D>
where D : DropElaborator<'b, 'tcx> + 'l
{
elaborator: &'l mut D,
source_info: SourceInfo,
lvalue: &'l Lvalue<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
}
pub fn elaborate_drop<'b, 'tcx, D>(
elaborator: &mut D,
source_info: SourceInfo,
lvalue: &Lvalue<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
bb: BasicBlock)
where D: DropElaborator<'b, 'tcx>
{
DropCtxt {
elaborator, source_info, lvalue, path, succ, unwind
}.elaborate_drop(bb)
}
impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
where D: DropElaborator<'b, 'tcx>
{
fn lvalue_ty(&self, lvalue: &Lvalue<'tcx>) -> Ty<'tcx> {
lvalue.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx())
}
fn tcx(&self) -> ty::TyCtxt<'b, 'tcx, 'tcx> {
self.elaborator.tcx()
}
/// This elaborates a single drop instruction, located at `bb`, and
/// patches over it.
///
/// The elaborated drop checks the drop flags to only drop what
/// is initialized.
///
/// In addition, the relevant drop flags also need to be cleared
/// to avoid double-drops. However, in the middle of a complex
/// drop, one must avoid clearing some of the flags before they
/// are read, as that would cause a memory leak.
///
/// In particular, when dropping an ADT, multiple fields may be
/// joined together under the `rest` subpath. They are all controlled
/// by the primary drop flag, but only the last rest-field dropped
/// should clear it (and it must also not clear anything else).
///
/// FIXME: I think we should just control the flags externally
/// and then we do not need this machinery.
pub fn elaborate_drop<'a>(&mut self, bb: BasicBlock) {
debug!("elaborate_drop({:?})", self);
let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep);
debug!("elaborate_drop({:?}): live - {:?}", self, style);
match style {
DropStyle::Dead => {
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto {
target: self.succ
});
}
DropStyle::Static => {
let loc = self.terminator_loc(bb);
self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep);
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Drop {
location: self.lvalue.clone(),
target: self.succ,
unwind: self.unwind.into_option(),
});
}
DropStyle::Conditional => {
let unwind = self.unwind; // FIXME(#6393)
let succ = self.succ;
let drop_bb = self.complete_drop(Some(DropFlagMode::Deep), succ, unwind);
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto {
target: drop_bb
});
}
DropStyle::Open => {
let drop_bb = self.open_drop();
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto {
target: drop_bb
});
}
}
}
/// Return the lvalue and move path for each field of `variant`,
/// (the move path is `None` if the field is a rest field).
fn move_paths_for_fields(&self,
base_lv: &Lvalue<'tcx>,
variant_path: D::Path,
variant: &'tcx ty::VariantDef,
substs: &'tcx Substs<'tcx>)
-> Vec<(Lvalue<'tcx>, Option<D::Path>)>
{
variant.fields.iter().enumerate().map(|(i, f)| {
let field = Field::new(i);
let subpath = self.elaborator.field_subpath(variant_path, field);
let field_ty =
self.tcx().normalize_associated_type_in_env(
&f.ty(self.tcx(), substs),
self.elaborator.param_env()
);
(base_lv.clone().field(field, field_ty), subpath)
}).collect()
}
fn drop_subpath(&mut self,
lvalue: &Lvalue<'tcx>,
path: Option<D::Path>,
succ: BasicBlock,
unwind: Unwind)
-> BasicBlock
{
if let Some(path) = path {
debug!("drop_subpath: for std field {:?}", lvalue);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
path, lvalue, succ, unwind,
}.elaborated_drop_block()
} else {
debug!("drop_subpath: for rest field {:?}", lvalue);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
lvalue, succ, unwind,
// Using `self.path` here to condition the drop on
// our own drop flag.
path: self.path
}.complete_drop(None, succ, unwind)
}
}
/// Create one-half of the drop ladder for a list of fields, and return
/// the list of steps in it in reverse order, with the first step
/// dropping 0 fields and so on.
///
/// `unwind_ladder` is such a list of steps in reverse order,
/// which is called if the matching step of the drop glue panics.
fn drop_halfladder(&mut self,
unwind_ladder: &[Unwind],
mut succ: BasicBlock,
fields: &[(Lvalue<'tcx>, Option<D::Path>)])
-> Vec<BasicBlock>
{
Some(succ).into_iter().chain(
fields.iter().rev().zip(unwind_ladder)
.map(|(&(ref lv, path), &unwind_succ)| {
succ = self.drop_subpath(lv, path, succ, unwind_succ);
succ
})
).collect()
}
fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind) {
// Clear the "master" drop flag at the end. This is needed
// because the "master" drop protects the ADT's discriminant,
// which is invalidated after the ADT is dropped.
let (succ, unwind) = (self.succ, self.unwind); // FIXME(#6393)
(
self.drop_flag_reset_block(DropFlagMode::Shallow, succ, unwind),
unwind.map(|unwind| {
self.drop_flag_reset_block(DropFlagMode::Shallow, unwind, Unwind::InCleanup)
})
)
}
/// Create a full drop ladder, consisting of 2 connected half-drop-ladders
///
/// For example, with 3 fields, the drop ladder is
///
/// .d0:
/// ELAB(drop location.0 [target=.d1, unwind=.c1])
/// .d1:
/// ELAB(drop location.1 [target=.d2, unwind=.c2])
/// .d2:
/// ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`])
/// .c1:
/// ELAB(drop location.1 [target=.c2])
/// .c2:
/// ELAB(drop location.2 [target=`self.unwind`])
///
/// NOTE: this does not clear the master drop flag, so you need
/// to point succ/unwind on a `drop_ladder_bottom`.
fn drop_ladder<'a>(&mut self,
fields: Vec<(Lvalue<'tcx>, Option<D::Path>)>,
succ: BasicBlock,
unwind: Unwind)
-> (BasicBlock, Unwind)
{
debug!("drop_ladder({:?}, {:?})", self, fields);
let mut fields = fields;
fields.retain(|&(ref lvalue, _)| {
self.lvalue_ty(lvalue).needs_drop(self.tcx(), self.elaborator.param_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind {
let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields);
halfladder.into_iter().map(Unwind::To).collect()
} else {
unwind_ladder
};
let normal_ladder =
self.drop_halfladder(&unwind_ladder, succ, &fields);
(*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap())
}
fn open_drop_for_tuple<'a>(&mut self, tys: &[Ty<'tcx>])
-> BasicBlock
{
debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
let fields = tys.iter().enumerate().map(|(i, &ty)| {
(self.lvalue.clone().field(Field::new(i), ty),
self.elaborator.field_subpath(self.path, Field::new(i)))
}).collect();
let (succ, unwind) = self.drop_ladder_bottom();
self.drop_ladder(fields, succ, unwind).0
}
fn open_drop_for_box<'a>(&mut self, ty: Ty<'tcx>) -> BasicBlock
{
debug!("open_drop_for_box({:?}, {:?})", self, ty);
let interior = self.lvalue.clone().deref();
let interior_path = self.elaborator.deref_subpath(self.path);
let succ = self.succ; // FIXME(#6393)
let unwind = self.unwind;
let succ = self.box_free_block(ty, succ, unwind);
let unwind_succ = self.unwind.map(|unwind| {
self.box_free_block(ty, unwind, Unwind::InCleanup)
});
self.drop_subpath(&interior, interior_path, succ, unwind_succ)
}
fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>)
-> BasicBlock {
debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
if adt.variants.len() == 0 {
return self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::Unreachable
}),
is_cleanup: self.unwind.is_cleanup()
});
}
let contents_drop = if adt.is_union() {
(self.succ, self.unwind)
} else {
self.open_drop_for_adt_contents(adt, substs)
};
if adt.has_dtor(self.tcx()) {
self.destructor_call_block(contents_drop)
} else {
contents_drop.0
}
}
fn open_drop_for_adt_contents(&mut self, adt: &'tcx ty::AdtDef,
substs: &'tcx Substs<'tcx>)
-> (BasicBlock, Unwind) {
let (succ, unwind) = self.drop_ladder_bottom();
if adt.variants.len() == 1 {
let fields = self.move_paths_for_fields(
self.lvalue,
self.path,
&adt.variants[0],
substs
);
self.drop_ladder(fields, succ, unwind)
} else {
self.open_drop_for_multivariant(adt, substs, succ, unwind)
}
}
fn open_drop_for_multivariant(&mut self, adt: &'tcx ty::AdtDef,
substs: &'tcx Substs<'tcx>,
succ: BasicBlock,
unwind: Unwind)
-> (BasicBlock, Unwind) {
let mut values = Vec::with_capacity(adt.variants.len());
let mut normal_blocks = Vec::with_capacity(adt.variants.len());
let mut unwind_blocks = if unwind.is_cleanup() {
None
} else {
Some(Vec::with_capacity(adt.variants.len()))
};
let mut have_otherwise = false;
for (variant_index, discr) in adt.discriminants(self.tcx()).enumerate() {
let subpath = self.elaborator.downcast_subpath(
self.path, variant_index);
if let Some(variant_path) = subpath {
let base_lv = self.lvalue.clone().elem(
ProjectionElem::Downcast(adt, variant_index)
);
let fields = self.move_paths_for_fields(
&base_lv,
variant_path,
&adt.variants[variant_index],
substs);
values.push(discr);
if let Unwind::To(unwind) = unwind {
// We can't use the half-ladder from the original
// drop ladder, because this breaks the
// "funclet can't have 2 successor funclets"
// requirement from MSVC:
//
// switch unwind-switch
// / \ / \
// v1.0 v2.0 v2.0-unwind v1.0-unwind
// | | / |
// v1.1-unwind v2.1-unwind |
// ^ |
// \-------------------------------/
//
// Create a duplicate half-ladder to avoid that. We
// could technically only do this on MSVC, but I
// I want to minimize the divergence between MSVC
// and non-MSVC.
let unwind_blocks = unwind_blocks.as_mut().unwrap();
let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
let halfladder =
self.drop_halfladder(&unwind_ladder, unwind, &fields);
unwind_blocks.push(halfladder.last().cloned().unwrap());
}
let (normal, _) = self.drop_ladder(fields, succ, unwind);
normal_blocks.push(normal);
} else {
have_otherwise = true;
}
}
if have_otherwise {
normal_blocks.push(self.drop_block(succ, unwind));
if let Unwind::To(unwind) = unwind {
unwind_blocks.as_mut().unwrap().push(
self.drop_block(unwind, Unwind::InCleanup)
);
}
} else {
values.pop();
}
(self.adt_switch_block(adt, normal_blocks, &values, succ, unwind),
unwind.map(|unwind| {
self.adt_switch_block(
adt, unwind_blocks.unwrap(), &values, unwind, Unwind::InCleanup
)
}))
}
fn adt_switch_block(&mut self,
adt: &'tcx ty::AdtDef,
blocks: Vec<BasicBlock>,
values: &[ConstInt],
succ: BasicBlock,
unwind: Unwind)
-> BasicBlock {
// If there are multiple variants, then if something
// is present within the enum the discriminant, tracked
// by the rest path, must be initialized.
//
// Additionally, we do not want to switch on the
// discriminant after it is free-ed, because that
// way lies only trouble.
let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
let discr = Lvalue::Local(self.new_temp(discr_ty));
let discr_rv = Rvalue::Discriminant(self.lvalue.clone());
let switch_block = BasicBlockData {
statements: vec![self.assign(&discr, discr_rv)],
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::SwitchInt {
discr: Operand::Consume(discr),
switch_ty: discr_ty,
values: From::from(values.to_owned()),
targets: blocks,
}
}),
is_cleanup: unwind.is_cleanup(),
};
let switch_block = self.elaborator.patch().new_block(switch_block);
self.drop_flag_test_block(switch_block, succ, unwind)
}
fn destructor_call_block<'a>(&mut self, (succ, unwind): (BasicBlock, Unwind))
-> BasicBlock
{
debug!("destructor_call_block({:?}, {:?})", self, succ);
let tcx = self.tcx();
let drop_trait = tcx.lang_items().drop_trait().unwrap();
let drop_fn = tcx.associated_items(drop_trait).next().unwrap();
let ty = self.lvalue_ty(self.lvalue);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty,
mutbl: hir::Mutability::MutMutable
});
let ref_lvalue = self.new_temp(ref_ty);
let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil()));
let result = BasicBlockData {
statements: vec![self.assign(
&Lvalue::Local(ref_lvalue),
Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone())
)],
terminator: Some(Terminator {
kind: TerminatorKind::Call {
func: Operand::function_handle(tcx, drop_fn.def_id, substs,
self.source_info.span),
args: vec![Operand::Consume(Lvalue::Local(ref_lvalue))],
destination: Some((unit_temp, succ)),
cleanup: unwind.into_option(),
},
source_info: self.source_info
}),
is_cleanup: unwind.is_cleanup(),
};
self.elaborator.patch().new_block(result)
}
/// create a loop that drops an array:
///
///
/// loop-block:
/// can_go = cur == length_or_end
/// if can_go then succ else drop-block
/// drop-block:
/// if ptr_based {
/// ptr = cur
/// cur = cur.offset(1)
/// } else {
/// ptr = &mut LV[cur]
/// cur = cur + 1
/// }
/// drop(ptr)
fn drop_loop(&mut self,
succ: BasicBlock,
cur: Local,
length_or_end: &Lvalue<'tcx>,
ety: Ty<'tcx>,
unwind: Unwind,
ptr_based: bool)
-> BasicBlock
{
let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone());
let tcx = self.tcx();
let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty: ety,
mutbl: hir::Mutability::MutMutable
});
let ptr = &Lvalue::Local(self.new_temp(ref_ty));
let can_go = &Lvalue::Local(self.new_temp(tcx.types.bool));
let one = self.constant_usize(1);
let (ptr_next, cur_next) = if ptr_based {
(Rvalue::Use(use_(&Lvalue::Local(cur))),
Rvalue::BinaryOp(BinOp::Offset, use_(&Lvalue::Local(cur)), one))
} else {
(Rvalue::Ref(
tcx.types.re_erased,
BorrowKind::Mut,
self.lvalue.clone().index(cur)),
Rvalue::BinaryOp(BinOp::Add, use_(&Lvalue::Local(cur)), one))
};
let drop_block = BasicBlockData {
statements: vec![
self.assign(ptr, ptr_next),
self.assign(&Lvalue::Local(cur), cur_next)
],
is_cleanup: unwind.is_cleanup(),
terminator: Some(Terminator {
source_info: self.source_info,
// this gets overwritten by drop elaboration.
kind: TerminatorKind::Unreachable,
})
};
let drop_block = self.elaborator.patch().new_block(drop_block);
let loop_block = BasicBlockData {
statements: vec![
self.assign(can_go, Rvalue::BinaryOp(BinOp::Eq,
use_(&Lvalue::Local(cur)),
use_(length_or_end)))
],
is_cleanup: unwind.is_cleanup(),
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::if_(tcx, use_(can_go), succ, drop_block)
})
};
let loop_block = self.elaborator.patch().new_block(loop_block);
self.elaborator.patch().patch_terminator(drop_block, TerminatorKind::Drop {
location: ptr.clone().deref(),
target: loop_block,
unwind: unwind.into_option()
});
loop_block
}
fn open_drop_for_array(&mut self, ety: Ty<'tcx>) -> BasicBlock {
debug!("open_drop_for_array({:?})", ety);
// if size_of::<ety>() == 0 {
// index_based_loop
// } else {
// ptr_based_loop
// }
let tcx = self.tcx();
let use_ = |lv: &Lvalue<'tcx>| Operand::Consume(lv.clone());
let size = &Lvalue::Local(self.new_temp(tcx.types.usize));
let size_is_zero = &Lvalue::Local(self.new_temp(tcx.types.bool));
let base_block = BasicBlockData {
statements: vec![
self.assign(size, Rvalue::NullaryOp(NullOp::SizeOf, ety)),
self.assign(size_is_zero, Rvalue::BinaryOp(BinOp::Eq,
use_(size),
self.constant_usize(0)))
],
is_cleanup: self.unwind.is_cleanup(),
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::if_(
tcx,
use_(size_is_zero),
self.drop_loop_pair(ety, false),
self.drop_loop_pair(ety, true)
)
})
};
self.elaborator.patch().new_block(base_block)
}
// create a pair of drop-loops of `lvalue`, which drops its contents
// even in the case of 1 panic. If `ptr_based`, create a pointer loop,
// otherwise create an index loop.
fn drop_loop_pair(&mut self, ety: Ty<'tcx>, ptr_based: bool) -> BasicBlock {
debug!("drop_loop_pair({:?}, {:?})", ety, ptr_based);
let tcx = self.tcx();
let iter_ty = if ptr_based {
tcx.mk_mut_ptr(ety)
} else {
tcx.types.usize
};
let cur = self.new_temp(iter_ty);
let length = Lvalue::Local(self.new_temp(tcx.types.usize));
let length_or_end = if ptr_based {
Lvalue::Local(self.new_temp(iter_ty))
} else {
length.clone()
};
let unwind = self.unwind.map(|unwind| {
self.drop_loop(unwind,
cur,
&length_or_end,
ety,
Unwind::InCleanup,
ptr_based)
});
let succ = self.succ; // FIXME(#6393)
let loop_block = self.drop_loop(
succ,
cur,
&length_or_end,
ety,
unwind,
ptr_based);
let cur = Lvalue::Local(cur);
let zero = self.constant_usize(0);
let mut drop_block_stmts = vec![];
drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.lvalue.clone())));
if ptr_based {
let tmp_ty = tcx.mk_mut_ptr(self.lvalue_ty(self.lvalue));
let tmp = Lvalue::Local(self.new_temp(tmp_ty));
// tmp = &LV;
// cur = tmp as *mut T;
// end = Offset(cur, len);
drop_block_stmts.push(self.assign(&tmp, Rvalue::Ref(
tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone()
)));
drop_block_stmts.push(self.assign(&cur, Rvalue::Cast(
CastKind::Misc, Operand::Consume(tmp.clone()), iter_ty
)));
drop_block_stmts.push(self.assign(&length_or_end,
Rvalue::BinaryOp(BinOp::Offset,
Operand::Consume(cur.clone()), Operand::Consume(length.clone())
)));
} else {
// index = 0 (length already pushed)
drop_block_stmts.push(self.assign(&cur, Rvalue::Use(zero)));
}
let drop_block = self.elaborator.patch().new_block(BasicBlockData {
statements: drop_block_stmts,
is_cleanup: unwind.is_cleanup(),
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::Goto { target: loop_block }
})
});
// FIXME(#34708): handle partially-dropped array/slice elements.
let reset_block = self.drop_flag_reset_block(DropFlagMode::Deep, drop_block, unwind);
self.drop_flag_test_block(reset_block, succ, unwind)
}
/// The slow-path - create an "open", elaborated drop for a type
/// which is moved-out-of only partially, and patch `bb` to a jump
/// to it. This must not be called on ADTs with a destructor,
/// as these can't be moved-out-of, except for `Box<T>`, which is
/// special-cased.
///
/// This creates a "drop ladder" that drops the needed fields of the
/// ADT, both in the success case or if one of the destructors fail.
fn open_drop<'a>(&mut self) -> BasicBlock {
let ty = self.lvalue_ty(self.lvalue);
match ty.sty {
ty::TyClosure(def_id, substs) |
// Note that `elaborate_drops` only drops the upvars of a generator,
// and this is ok because `open_drop` here can only be reached
// within that own generator's resume function.
// This should only happen for the self argument on the resume function.
// It effetively only contains upvars until the generator transformation runs.
// See librustc_mir/transform/generator.rs for more details.
ty::TyGenerator(def_id, substs, _) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
self.open_drop_for_tuple(&tys)
}
ty::TyTuple(tys, _) => {
self.open_drop_for_tuple(tys)
}
ty::TyAdt(def, _) if def.is_box() => {
self.open_drop_for_box(ty.boxed_ty())
}
ty::TyAdt(def, substs) => {
self.open_drop_for_adt(def, substs)
}
ty::TyDynamic(..) => {
let unwind = self.unwind; // FIXME(#6393)
let succ = self.succ;
self.complete_drop(Some(DropFlagMode::Deep), succ, unwind)
}
ty::TyArray(ety, _) | ty::TySlice(ety) => {
self.open_drop_for_array(ety)
}
_ => bug!("open drop from non-ADT `{:?}`", ty)
}
}
/// Return a basic block that drop an lvalue using the context
/// and path in `c`. If `mode` is something, also clear `c`
/// according to it.
///
/// if FLAG(self.path)
/// if let Some(mode) = mode: FLAG(self.path)[mode] = false
/// drop(self.lv)
fn complete_drop<'a>(&mut self,
drop_mode: Option<DropFlagMode>,
succ: BasicBlock,
unwind: Unwind) -> BasicBlock
{
debug!("complete_drop({:?},{:?})", self, drop_mode);
let drop_block = self.drop_block(succ, unwind);
let drop_block = if let Some(mode) = drop_mode {
self.drop_flag_reset_block(mode, drop_block, unwind)
} else {
drop_block
};
self.drop_flag_test_block(drop_block, succ, unwind)
}
fn drop_flag_reset_block(&mut self,
mode: DropFlagMode,
succ: BasicBlock,
unwind: Unwind) -> BasicBlock
{
debug!("drop_flag_reset_block({:?},{:?})", self, mode);
let block = self.new_block(unwind, TerminatorKind::Goto { target: succ });
let block_start = Location { block: block, statement_index: 0 };
self.elaborator.clear_drop_flag(block_start, self.path, mode);
block
}
fn elaborated_drop_block<'a>(&mut self) -> BasicBlock {
debug!("elaborated_drop_block({:?})", self);
let unwind = self.unwind; // FIXME(#6393)
let succ = self.succ;
let blk = self.drop_block(succ, unwind);
self.elaborate_drop(blk);
blk
}
fn box_free_block<'a>(
&mut self,
ty: Ty<'tcx>,
target: BasicBlock,
unwind: Unwind,
) -> BasicBlock {
let block = self.unelaborated_free_block(ty, target, unwind);
self.drop_flag_test_block(block, target, unwind)
}
fn unelaborated_free_block<'a>(
&mut self,
ty: Ty<'tcx>,
target: BasicBlock,
unwind: Unwind
) -> BasicBlock {
let tcx = self.tcx();
let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil()));
let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let call = TerminatorKind::Call {
func: Operand::function_handle(tcx, free_func, substs, self.source_info.span),
args: vec![Operand::Consume(self.lvalue.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}; // FIXME(#6393)
let free_block = self.new_block(unwind, call);
let block_start = Location { block: free_block, statement_index: 0 };
self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
free_block
}
fn drop_block<'a>(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
let block = TerminatorKind::Drop {
location: self.lvalue.clone(),
target,
unwind: unwind.into_option()
};
self.new_block(unwind, block)
}
fn drop_flag_test_block(&mut self,
on_set: BasicBlock,
on_unset: BasicBlock,
unwind: Unwind)
-> BasicBlock
{
let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow);
debug!("drop_flag_test_block({:?},{:?},{:?},{:?}) - {:?}",
self, on_set, on_unset, unwind, style);
match style {
DropStyle::Dead => on_unset,
DropStyle::Static => on_set,
DropStyle::Conditional | DropStyle::Open => {
let flag = self.elaborator.get_drop_flag(self.path).unwrap();
let term = TerminatorKind::if_(self.tcx(), flag, on_set, on_unset);
self.new_block(unwind, term)
}
}
}
fn new_block<'a>(&mut self,
unwind: Unwind,
k: TerminatorKind<'tcx>)
-> BasicBlock
{
self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: self.source_info, kind: k
}),
is_cleanup: unwind.is_cleanup()
})
}
fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
self.elaborator.patch().new_temp(ty, self.source_info.span)
}
fn terminator_loc(&mut self, bb: BasicBlock) -> Location {
let mir = self.elaborator.mir();
self.elaborator.patch().terminator_loc(mir, bb)
}
fn constant_usize(&self, val: u16) -> Operand<'tcx> {
Operand::Constant(box Constant {
span: self.source_info.span,
ty: self.tcx().types.usize,
literal: Literal::Value {
value: self.tcx().mk_const(ty::Const {
val: ConstVal::Integral(self.tcx().const_usize(val)),
ty: self.tcx().types.usize
})
}
})
}
fn assign(&self, lhs: &Lvalue<'tcx>, rhs: Rvalue<'tcx>) -> Statement<'tcx> {
Statement {
source_info: self.source_info,
kind: StatementKind::Assign(lhs.clone(), rhs)
}
}
}
| 36.974495 | 93 | 0.523065 |
14edc06bfc64c2e4ad520043371b128912b4b22f
| 864 |
#[cfg(target_os = "mac_os")]
use security_framework::os::macos::keychain::SecKeychain;
#[cfg(target_os = "mac_os")]
use security_framework::os::macos::passwords::*;
fn main() {
#[cfg(target_os = "mac_os")] {
let hostname = "example.com";
let username = "rusty";
let password = b"oxidize";
let res = SecKeychain::default().unwrap().set_internet_password(
hostname,
None,
username,
"",
None,
SecProtocolType::HTTPS,
SecAuthenticationType::HTMLForm,
password,
);
match res {
Ok(_) => {
println!(
"Password set for {}@{}. You can read it using find_internet_password example",
username, hostname
);
}
Err(err) => {
eprintln!("Could not set password: {:?}", err);
}
}
}}
| 25.411765 | 95 | 0.534722 |
167ca16ebb95f6425cdfb4226d6e37bb5d3a9d4e
| 6,566 |
use crate::prelude::*;
/// A `Brush` defines the fill pattern of shapes.
/// The syntax allows to express fill patterns in several ways:
///
/// * solid colors
/// * colors with alpha channel
/// * gradients of colors
/// * gradients with directions
/// * gradients with angles
///
/// The string declaration of a `Brush` is composed combining the following
/// syntax elements:
///
/// 1. The `color name`
/// 2. The `gradient` string
/// * the gradient type (linear, repeating-linear)
/// * gradient attributes (direction-identifier, angles, color names )
///
/// ## Examples
/// Here are some implementations with declarations of colors, degrees, orientations and directions.
///
/// ```text
/// .foreground("white")
/// .background("black")
/// .background("linear-gradient(0deg, #4b6cb7, #182848)")
/// .background("repeating-linear-gradient(0.25turn, rgba(255, 255, 0, 0.6), dodgerblue, deepskyblue)")
/// .background("linear-gradient(-90deg, hsv(201, 94%, 80.5%), steelblue)")
/// .background("linear-gradient(to top right, white, skyblue 60%, lightskyblue 80%, yellow 83%, yellow)")
/// ```
/// Read on to see how the syntax is composed.
///
/// ## Definition of a color name
/// With the given implementation you can choose between three methods
/// to define a color.
///
/// A. `color codes`
///
/// You can define the value of a color with a symbol "#" followed
/// by letters or numbers. These numbers are in hexadecimal numeral system.
/// The short variant will use 3 numbers , the long variant will use 6
/// numbers.
/// For example `#f00` will give you red. If you write `#0000ff`, you will
/// get blue.
/// To include an alpha channel, the short variant takes 4 numbers.
/// If you need a yellow with 50.2% opaque, you use `#ff08`.
/// In the long form you need 8 numbers. `#0000ff80` represents 50.2% opaque
/// (non-premultiplied) blue.
///
/// B. `color function`
///
/// Currently the unique available functions that interpret a color are
/// distincted with the keywords `rgb`, `hsv`, `hsb`, `hsl`. There are
/// `alpha variants` as well. `hsb` is an alias to `hsv`.
/// Alpha variants are coded with the keywords `rgba`, `abgr` or `argb`.
/// Here is an example to define a color via the function method:
/// `hsl(197, 71%, 73%)` will provide you a pretty skyblue color.
/// For `rgb` and `rgba` the range of the values are 0-255.
/// Any other keyword will use floating point integers to define the color
/// value. `hsva(0.0-360.0, 0.0-1.0, 0.0-1.0, 0.0-1.0)` is such an example.
/// In addition you can choose to use percent values (`%` sign) for the given
/// parameters.
/// When appending the `%` sign to the range parameters of the `rgb` function
/// call, the values are mapped to 0.0-100.0 (percent) or 0.0-1.0 (min/max).
/// For all other keywords (`hsv`, `hsb`, `hsl`) you are not allowed to append
/// the percent sign to the first parameter. If you append `%` to the following
/// parameters, OrbTk will interpret the values in a range between `0.0-100.0`.
///
/// C. `color name`
///
/// **WIP: The given implementation is using (utils/colors.txt). This has to be adopted!!!**
///
/// OrbTK maintains color names as constants [`utils::const_colors`]. It enables
/// you, to directly choose their string value inside the code.
/// Example color names are:
///
/// * COLOR_WHITE
/// * COLOR_RED
/// * COLOR_OLIVE
/// * COLOR_LINK_WATER
/// * COLOR_SLATE_GRAY
///
/// ## Definition of a gradient
/// The syntax of a gradient definition is structured as follows:
///
/// * Optional parameters are inside brackets (`[]`).
/// * Within braces (`{}`) you define the appropriate parameter value.
/// * The pipe (`|`) is offering mutual exclusive variants
/// e.g: degrees(deg), radians(rad) or turns(turn).
/// * Three points (`...`) refer to multiple stops.
/// They are respected when a gradient is rendered.
///
/// To understand gradient directions, imagine a line or vector that
/// starts at a given point inside the entity and points to an
/// imaginary target point within the same entity. Gradients will be
/// rendered along the choosen direction to reach its target
/// poing. Supported directions are:
///
/// * "to bottom"
/// * "to bottom left"
/// * "to bottom right"
/// * "to left"
/// * "to right"
/// * "to top
/// * "to top left"
/// * "to top right"
///
/// Displacement points tell the gradient algorithm to add
/// (`positive`) or or substract (`negative`) the given pixel numbers
/// from the original starting point.
///
/// Lets look at some examples. The first one shows the
/// structure of an angled gradient
///
/// ```
/// [repeating-]linear-gradient({Gradient-angle}{deg|rad|turn}, ...) [{X Displacement}px {Y Displacement}px], {Color} [{Stop position}{%|px}]
/// ```
///
/// The next example shows the structure of a gradient that will be
/// rendered in a given direction
///
/// ```
/// [repeating-]linear-gradient({direction-identifier}, {initial color-name}, {terminating color-name}
/// ```
///
//#[cfg(feature = "nightly")]
//#[doc(include = "../colors.md")]
#[derive(Clone, PartialEq, Debug)]
pub enum Brush {
/// Paints an area with a solid color.
SolidColor(Color),
/// Paints an area with a gradient.
Gradient(Gradient),
}
impl Brush {
pub fn is_transparent(&self) -> bool {
match self {
Brush::SolidColor(color) => color.a() == 0,
_ => false,
}
}
}
impl From<Brush> for Color {
fn from(b: Brush) -> Color {
match b {
Brush::SolidColor(color) => color,
_ => Color::rgb(0, 0, 0),
}
}
}
impl From<Brush> for Gradient {
fn from(b: Brush) -> Gradient {
match b {
Brush::Gradient(g) => g,
_ => Gradient::default(),
}
}
}
impl Default for Brush {
fn default() -> Self {
Brush::SolidColor(Color::rgba(0, 0, 0, 0))
}
}
impl From<Color> for Brush {
fn from(c: Color) -> Brush {
Brush::SolidColor(c)
}
}
impl From<Gradient> for Brush {
fn from(g: Gradient) -> Brush {
Brush::Gradient(g)
}
}
impl From<&str> for Brush {
fn from(s: &str) -> Brush {
Expression::from(s).brush().unwrap_or_default()
}
}
impl From<String> for Brush {
fn from(s: String) -> Brush {
Self::from(&s[..])
}
}
impl From<Value> for Brush {
fn from(v: Value) -> Self {
let value = v.get::<String>();
Brush::from(value)
}
}
#[cfg(test)]
mod tests {
// use crate::prelude::*;
// todo: tbd after brush struct is finished
}
| 31.719807 | 142 | 0.63509 |
fb7b785edd333eff31793043755d40944b64518d
| 11,539 |
//! Abstractions for page tables and page table entries.
use core::fmt;
use core::ops::{Index, IndexMut};
use super::{PageSize, PhysFrame, Size4KiB};
use crate::addr::PhysAddr;
use bitflags::bitflags;
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FrameError {
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
FrameNotPresent,
/// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
/// as return type, so a huge frame can't be returned.
HugeFrame,
}
/// A 64-bit page table entry.
#[derive(Clone)]
#[repr(transparent)]
pub struct PageTableEntry {
entry: u64,
}
impl PageTableEntry {
/// Creates an unused page table entry.
#[inline]
pub const fn new() -> Self {
PageTableEntry { entry: 0 }
}
/// Returns whether this entry is zero.
#[inline]
pub const fn is_unused(&self) -> bool {
self.entry == 0
}
/// Sets this entry to zero.
#[inline]
pub fn set_unused(&mut self) {
self.entry = 0;
}
/// Returns the flags of this entry.
#[inline]
pub const fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.entry)
}
/// Returns the physical address mapped by this entry, might be zero.
#[inline]
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & 0x000fffff_fffff000)
}
/// Returns the physical frame mapped by this entry.
///
/// Returns the following errors:
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
#[inline]
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if !self.flags().contains(PageTableFlags::PRESENT) {
Err(FrameError::FrameNotPresent)
} else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}
/// Map the entry to the specified physical address with the specified flags.
#[inline]
pub fn set_addr(&mut self, addr: PhysAddr, flags: PageTableFlags) {
assert!(addr.is_aligned(Size4KiB::SIZE));
self.entry = (addr.as_u64()) | flags.bits();
}
/// Map the entry to the specified physical frame with the specified flags.
#[inline]
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
}
/// Sets the flags of this entry.
#[inline]
pub fn set_flags(&mut self, flags: PageTableFlags) {
self.entry = self.addr().as_u64() | flags.bits();
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("addr", &self.addr());
f.field("flags", &self.flags());
f.finish()
}
}
bitflags! {
/// Possible flags for a page table entry.
pub struct PageTableFlags: u64 {
/// Specifies whether the mapped frame or page table is loaded in memory.
const PRESENT = 1;
/// Controls whether writes to the mapped frames are allowed.
///
/// If this bit is unset in a level 1 page table entry, the mapped frame is read-only.
/// If this bit is unset in a higher level page table entry the complete range of mapped
/// pages is read-only.
const WRITABLE = 1 << 1;
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER_ACCESSIBLE = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry is cacheable.
const NO_CACHE = 1 << 4;
/// Set by the CPU when the mapped frame or page table is accessed.
const ACCESSED = 1 << 5;
/// Set by the CPU on a write to the mapped frame.
const DIRTY = 1 << 6;
/// Specifies that the entry maps a huge frame instead of a page table. Only allowed in
/// P2 or P3 tables.
const HUGE_PAGE = 1 << 7;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_9 = 1 << 9;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_10 = 1 << 10;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_11 = 1 << 11;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_52 = 1 << 52;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_53 = 1 << 53;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_54 = 1 << 54;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_55 = 1 << 55;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_56 = 1 << 56;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_57 = 1 << 57;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_58 = 1 << 58;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_59 = 1 << 59;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_60 = 1 << 60;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_61 = 1 << 61;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_62 = 1 << 62;
/// Forbid code execution from the mapped frames.
///
/// Can be only used when the no-execute page protection feature is enabled in the EFER
/// register.
const NO_EXECUTE = 1 << 63;
}
}
/// The number of entries in a page table.
const ENTRY_COUNT: usize = 512;
/// Represents a page table.
///
/// Always page-sized.
///
/// This struct implements the `Index` and `IndexMut` traits, so the entries can be accessed
/// through index operations. For example, `page_table[15]` returns the 15th page table entry.
#[repr(align(4096))]
#[repr(C)]
pub struct PageTable {
entries: [PageTableEntry; ENTRY_COUNT],
}
impl PageTable {
/// Creates an empty page table.
#[cfg(feature = "const_fn")]
#[inline]
pub const fn new() -> Self {
const EMPTY: PageTableEntry = PageTableEntry::new();
PageTable {
entries: [EMPTY; ENTRY_COUNT],
}
}
/// Creates an empty page table.
#[cfg(not(feature = "const_fn"))]
#[inline]
pub fn new() -> Self {
PageTable {
entries: array_init::array_init(|_| PageTableEntry::new()),
}
}
/// Clears all entries.
#[inline]
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
/// Returns an iterator over the entries of the page table.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
self.entries.iter()
}
/// Returns an iterator that allows modifying the entries of the page table.
#[inline]
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
self.entries.iter_mut()
}
}
impl Index<usize> for PageTable {
type Output = PageTableEntry;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl IndexMut<usize> for PageTable {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}
impl Index<PageTableIndex> for PageTable {
type Output = PageTableEntry;
#[inline]
fn index(&self, index: PageTableIndex) -> &Self::Output {
&self.entries[usize::from(index)]
}
}
impl IndexMut<PageTableIndex> for PageTable {
#[inline]
fn index_mut(&mut self, index: PageTableIndex) -> &mut Self::Output {
&mut self.entries[usize::from(index)]
}
}
impl fmt::Debug for PageTable {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.entries[..].fmt(f)
}
}
/// A 9-bit index into a page table.
///
/// Can be used to select one of the 512 entries of a page table.
///
/// Guaranteed to only ever contain 0..512.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct PageTableIndex(u16);
impl PageTableIndex {
/// Creates a new index from the given `u16`. Panics if the given value is >=512.
#[inline]
pub fn new(index: u16) -> Self {
assert!(usize::from(index) < ENTRY_COUNT);
Self(index)
}
/// Creates a new index from the given `u16`. Throws away bits if the value is >=512.
#[inline]
pub const fn new_truncate(index: u16) -> Self {
Self(index % ENTRY_COUNT as u16)
}
}
impl From<PageTableIndex> for u16 {
#[inline]
fn from(index: PageTableIndex) -> Self {
index.0
}
}
impl From<PageTableIndex> for u32 {
#[inline]
fn from(index: PageTableIndex) -> Self {
u32::from(index.0)
}
}
impl From<PageTableIndex> for u64 {
#[inline]
fn from(index: PageTableIndex) -> Self {
u64::from(index.0)
}
}
impl From<PageTableIndex> for usize {
#[inline]
fn from(index: PageTableIndex) -> Self {
usize::from(index.0)
}
}
/// A 12-bit offset into a 4KiB Page.
///
/// This type is returned by the `VirtAddr::page_offset` method.
///
/// Guaranteed to only ever contain 0..4096.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct PageOffset(u16);
impl PageOffset {
/// Creates a new offset from the given `u16`. Panics if the passed value is >=4096.
#[inline]
pub fn new(offset: u16) -> Self {
assert!(offset < (1 << 12));
Self(offset)
}
/// Creates a new offset from the given `u16`. Throws away bits if the value is >=4096.
#[inline]
pub const fn new_truncate(offset: u16) -> Self {
Self(offset % (1 << 12))
}
}
impl From<PageOffset> for u16 {
#[inline]
fn from(offset: PageOffset) -> Self {
offset.0
}
}
impl From<PageOffset> for u32 {
#[inline]
fn from(offset: PageOffset) -> Self {
u32::from(offset.0)
}
}
impl From<PageOffset> for u64 {
#[inline]
fn from(offset: PageOffset) -> Self {
u64::from(offset.0)
}
}
impl From<PageOffset> for usize {
#[inline]
fn from(offset: PageOffset) -> Self {
usize::from(offset.0)
}
}
| 31.441417 | 99 | 0.599012 |
750bb27bb45fb47f99d15320730fb48d90ac8c9d
| 1,429 |
// Take a look at the license at the top of the repository in the LICENSE file.
use glib::subclass::prelude::*;
use glib::translate::*;
use glib::Cast;
use super::window::WindowImpl;
use crate::Plug;
pub trait PlugImpl: PlugImplExt + WindowImpl {
fn embedded(&self, plug: &Self::Type) {
self.parent_embedded(plug)
}
}
pub trait PlugImplExt: ObjectSubclass {
fn parent_embedded(&self, plug: &Self::Type);
}
impl<T: PlugImpl> PlugImplExt for T {
fn parent_embedded(&self, plug: &Self::Type) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkPlugClass;
if let Some(f) = (*parent_class).embedded {
f(plug.unsafe_cast_ref::<Plug>().to_glib_none().0)
}
}
}
}
unsafe impl<T: PlugImpl> IsSubclassable<T> for Plug {
fn class_init(class: &mut ::glib::Class<Self>) {
Self::parent_class_init::<T>(class);
if !crate::rt::is_initialized() {
panic!("GTK has to be initialized first");
}
let klass = class.as_mut();
klass.embedded = Some(plug_embedded::<T>);
}
}
unsafe extern "C" fn plug_embedded<T: PlugImpl>(ptr: *mut ffi::GtkPlug) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<Plug> = from_glib_borrow(ptr);
imp.embedded(wrap.unsafe_cast_ref())
}
| 26.962264 | 86 | 0.614416 |
11c42810eda6775d2a79c34a2ac7b39e7e2b885a
| 2,826 |
use crate::{commands::dataframe::utils::parse_polars_error, prelude::*};
use nu_engine::WholeStreamCommand;
use nu_errors::ShellError;
use nu_protocol::{dataframe::NuDataFrame, Signature, SyntaxShape};
use nu_source::Tagged;
pub struct DataFrame;
impl WholeStreamCommand for DataFrame {
fn name(&self) -> &str {
"pls sample"
}
fn usage(&self) -> &str {
"Create sample dataframe"
}
fn signature(&self) -> Signature {
Signature::build("pls load")
.named(
"n_rows",
SyntaxShape::Number,
"number of rows to be taken from dataframe",
Some('n'),
)
.named(
"fraction",
SyntaxShape::Number,
"fraction of dataframe to be taken",
Some('f'),
)
.switch("replace", "sample with replace", Some('e'))
}
fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> {
command(args)
}
fn examples(&self) -> Vec<Example> {
vec![
Example {
description: "Sample rows from dataframe",
example: "[[a b]; [1 2] [3 4]] | pls to-df | pls sample -r 1",
result: None,
},
Example {
description: "Shows sample row using fraction and replace",
example: "[[a b]; [1 2] [3 4] [5 6]] | pls to-df | pls sample -f 0.5 -e",
result: None,
},
]
}
}
fn command(mut args: CommandArgs) -> Result<OutputStream, ShellError> {
let tag = args.call_info.name_tag.clone();
let rows: Option<Tagged<usize>> = args.get_flag("n_rows")?;
let fraction: Option<Tagged<f64>> = args.get_flag("fraction")?;
let replace: bool = args.has_flag("replace");
let df = NuDataFrame::try_from_stream(&mut args.input, &tag.span)?;
let res = match (rows, fraction) {
(Some(rows), None) => df
.as_ref()
.sample_n(rows.item, replace)
.map_err(|e| parse_polars_error::<&str>(&e, &rows.tag.span, None)),
(None, Some(frac)) => df
.as_ref()
.sample_frac(frac.item, replace)
.map_err(|e| parse_polars_error::<&str>(&e, &frac.tag.span, None)),
(Some(_), Some(_)) => Err(ShellError::labeled_error(
"Incompatible flags",
"Only one selection criterion allowed",
&tag,
)),
(None, None) => Err(ShellError::labeled_error_with_secondary(
"No selection",
"No selection criterion was found",
&tag,
"Perhaps you want to use the flag -n or -f",
&tag,
)),
}?;
Ok(OutputStream::one(NuDataFrame::dataframe_to_value(res, tag)))
}
| 31.4 | 89 | 0.528662 |
f815cb57c3e005c9ff785c99673197d90f7e8299
| 3,418 |
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use super::dispatch_json::{JsonOp, Value};
use crate::ops::json_op;
use crate::state::ThreadSafeState;
use deno_core::*;
#[cfg(unix)]
use super::dispatch_json::Deserialize;
#[cfg(unix)]
use crate::deno_error::bad_resource;
#[cfg(unix)]
use futures::future::{poll_fn, FutureExt};
#[cfg(unix)]
use serde_json;
#[cfg(unix)]
use std::task::Waker;
#[cfg(unix)]
use tokio::signal::unix::{signal, Signal, SignalKind};
pub fn init(i: &mut Isolate, s: &ThreadSafeState) {
i.register_op(
"signal_bind",
s.core_op(json_op(s.stateful_op(op_signal_bind))),
);
i.register_op(
"signal_unbind",
s.core_op(json_op(s.stateful_op(op_signal_unbind))),
);
i.register_op(
"signal_poll",
s.core_op(json_op(s.stateful_op(op_signal_poll))),
);
}
#[cfg(unix)]
/// The resource for signal stream.
/// The second element is the waker of polling future.
pub struct SignalStreamResource(pub Signal, pub Option<Waker>);
#[cfg(unix)]
#[derive(Deserialize)]
struct BindSignalArgs {
signo: i32,
}
#[cfg(unix)]
#[derive(Deserialize)]
struct SignalArgs {
rid: i32,
}
#[cfg(unix)]
fn op_signal_bind(
state: &ThreadSafeState,
args: Value,
_zero_copy: Option<ZeroCopyBuf>,
) -> Result<JsonOp, ErrBox> {
let args: BindSignalArgs = serde_json::from_value(args)?;
let mut table = state.lock_resource_table();
let rid = table.add(
"signal",
Box::new(SignalStreamResource(
signal(SignalKind::from_raw(args.signo)).expect(""),
None,
)),
);
Ok(JsonOp::Sync(json!({
"rid": rid,
})))
}
#[cfg(unix)]
fn op_signal_poll(
state: &ThreadSafeState,
args: Value,
_zero_copy: Option<ZeroCopyBuf>,
) -> Result<JsonOp, ErrBox> {
let args: SignalArgs = serde_json::from_value(args)?;
let rid = args.rid as u32;
let state_ = state.clone();
let future = poll_fn(move |cx| {
let mut table = state_.lock_resource_table();
if let Some(mut signal) = table.get_mut::<SignalStreamResource>(rid) {
signal.1 = Some(cx.waker().clone());
return signal.0.poll_recv(cx);
}
std::task::Poll::Ready(None)
})
.then(|result| async move { Ok(json!({ "done": result.is_none() })) });
Ok(JsonOp::AsyncUnref(future.boxed_local()))
}
#[cfg(unix)]
pub fn op_signal_unbind(
state: &ThreadSafeState,
args: Value,
_zero_copy: Option<ZeroCopyBuf>,
) -> Result<JsonOp, ErrBox> {
let args: SignalArgs = serde_json::from_value(args)?;
let rid = args.rid as u32;
let mut table = state.lock_resource_table();
let resource = table.get::<SignalStreamResource>(rid);
if let Some(signal) = resource {
if let Some(waker) = &signal.1 {
// Wakes up the pending poll if exists.
// This prevents the poll future from getting stuck forever.
waker.clone().wake();
}
}
table.close(rid).ok_or_else(bad_resource)?;
Ok(JsonOp::Sync(json!({})))
}
#[cfg(not(unix))]
pub fn op_signal_bind(
_state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<ZeroCopyBuf>,
) -> Result<JsonOp, ErrBox> {
unimplemented!();
}
#[cfg(not(unix))]
fn op_signal_unbind(
_state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<ZeroCopyBuf>,
) -> Result<JsonOp, ErrBox> {
unimplemented!();
}
#[cfg(not(unix))]
fn op_signal_poll(
_state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<ZeroCopyBuf>,
) -> Result<JsonOp, ErrBox> {
unimplemented!();
}
| 24.070423 | 74 | 0.672908 |
50d6cc9b513d5de847aa0ed139a65df7334aa687
| 663 |
// Copyright 2020 Tetrate
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use self::logger::SampleAccessLogger;
mod config;
mod logger;
mod stats;
| 33.15 | 75 | 0.748115 |
221a81d512a54bfe4ba3bede4fe0921f01d0b437
| 4,983 |
/*
* Copyright 2015-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::ffi::CStr;
use std::os::raw::c_char;
use std::path::PathBuf;
use std::ptr;
use std::str::Split;
#[derive(Debug, PartialEq)]
pub struct Parameters {
pub count_threshold: usize,
pub heap_dump_path: Option<PathBuf>,
pub heap_histogram_max_entries: usize,
pub print_heap_histogram: bool,
pub print_memory_usage: bool,
pub time_threshold: usize,
}
impl Parameters {
pub fn new(options: *const c_char) -> Parameters {
let mut p = Parameters { ..Default::default() };
if options == ptr::null() {
return p;
}
let s = String::from(unsafe { CStr::from_ptr(options) }
.to_string_lossy());
for o in Parameters::parse_options(&s) {
if o.is_empty() {
continue;
}
let (key, value) = Parameters::parse_option(o);
match key {
"count" => p.count_threshold = value.parse().expect("option value must be a number"),
"heapDumpPath" => p.heap_dump_path = Some(PathBuf::from(value)),
"heapHistogramMaxEntries" => p.heap_histogram_max_entries = value.parse().expect("option value must be a number"),
"printHeapHistogram" => p.print_heap_histogram = value.parse::<usize>().expect("option value must be a number") != 0,
"printMemoryUsage" => p.print_memory_usage = value.parse::<usize>().expect("option value must be a number") != 0,
"time" => p.time_threshold = value.parse().expect("option value must be a number"),
_ => assert!(false, "unknown option: {}", key),
}
}
return p;
}
fn parse_option(s: &str) -> (&str, &str) {
let v: Vec<&str> = s.splitn(2, "=").collect();
assert_eq!(v.len(), 2, "invalid option: {}", s);
assert!(!v[0].is_empty(), "invalid key: {}", s);
assert!(!v[1].is_empty(), "invalid value: {}", s);
return (v[0], v[1]);
}
fn parse_options(s: &String) -> Split<char> {
return s.split(',');
}
}
impl Default for Parameters {
fn default() -> Self {
return Self {
count_threshold: 0,
heap_dump_path: None,
heap_histogram_max_entries: 100,
print_heap_histogram: false,
print_memory_usage: true,
time_threshold: 1,
};
}
}
#[cfg(test)]
mod tests {
use std::ffi::CString;
use std::path::PathBuf;
use crate::context::Parameters;
#[test]
fn default_values() {
let p = Parameters { ..Default::default() };
assert_eq!(p, Parameters {
count_threshold: 0,
heap_dump_path: None,
heap_histogram_max_entries: 100,
print_heap_histogram: false,
print_memory_usage: true,
time_threshold: 1,
});
}
#[test]
#[should_panic(expected = "invalid key: =test-value")]
fn empty_key() {
create("=test-value");
}
#[test]
#[should_panic(expected = "invalid option: test")]
fn empty_option() {
create("test");
}
#[test]
#[should_panic(expected = "invalid value: test-key=")]
fn empty_value() {
create("test-key=");
}
#[test]
#[should_panic(expected = "unknown option: test-key")]
fn invalid_option() {
create("test-key=test-value");
}
#[test]
fn parses_count() {
assert_eq!(create("count=42").count_threshold, 42);
}
#[test]
fn parses_heap_dump_path() {
assert_eq!(create("heapDumpPath=/test").heap_dump_path, Some(PathBuf::from("/test")));
}
#[test]
fn parses_heap_histogram_max_entries() {
assert_eq!(create("heapHistogramMaxEntries=42").heap_histogram_max_entries, 42);
}
#[test]
fn parses_print_heap_histogram() {
assert_eq!(create("printHeapHistogram=0").print_heap_histogram, false);
}
#[test]
fn parses_print_memory_usage() {
assert_eq!(create("printMemoryUsage=0").print_memory_usage, false);
}
#[test]
fn parses_time() {
assert_eq!(create("time=42").time_threshold, 42);
}
fn create(s: &str) -> Parameters {
let options = CString::new(s)
.expect("cannot convert to CString");
return Parameters::new(options.as_ptr());
}
}
| 28.803468 | 133 | 0.589003 |
f8f6a4d55988feeb20555ec1f0089bed3afd3972
| 1,416 |
use crate::Error;
use rand::Rng;
use std::marker::PhantomData;
use super::{
pedersen::{PedersenCommitment, PedersenParameters, PedersenRandomness, PedersenWindow},
CommitmentScheme,
};
pub use crate::crh::injective_map::InjectiveMap;
use algebra::groups::Group;
#[cfg(feature = "r1cs")]
pub mod constraints;
pub struct PedersenCommCompressor<G: Group, I: InjectiveMap<G>, W: PedersenWindow> {
_group: PhantomData<G>,
_compressor: PhantomData<I>,
_comm: PedersenCommitment<G, W>,
}
impl<G: Group, I: InjectiveMap<G>, W: PedersenWindow> CommitmentScheme
for PedersenCommCompressor<G, I, W>
{
type Output = I::Output;
type Parameters = PedersenParameters<G>;
type Randomness = PedersenRandomness<G>;
fn setup<R: Rng>(rng: &mut R) -> Result<Self::Parameters, Error> {
let time = start_timer!(|| format!("PedersenCompressor::Setup"));
let params = PedersenCommitment::<G, W>::setup(rng);
end_timer!(time);
params
}
fn commit(
parameters: &Self::Parameters,
input: &[u8],
randomness: &Self::Randomness,
) -> Result<Self::Output, Error> {
let eval_time = start_timer!(|| "PedersenCompressor::Eval");
let result = I::injective_map(&PedersenCommitment::<G, W>::commit(
parameters, input, randomness,
)?)?;
end_timer!(eval_time);
Ok(result)
}
}
| 29.5 | 91 | 0.642655 |
f7821cf385754e686d15eb5430a4e1a55cbf9d62
| 3,076 |
use crate::common::*;
use std::mem;
/// Construct a `Tree` from a symbolic expression literal. This macro, and the
/// Tree type, are only used in the Parser unit tests, as a concise notation
/// representing the expected results of parsing a given string.
macro_rules! tree {
{
($($child:tt)*)
} => {
$crate::tree::Tree::List(vec![$(tree!($child),)*])
};
{
$atom:ident
} => {
$crate::tree::Tree::atom(stringify!($atom))
};
{
$atom:literal
} => {
$crate::tree::Tree::atom(format!("\"{}\"", $atom))
};
{
#
} => {
$crate::tree::Tree::atom("#")
};
{
+
} => {
$crate::tree::Tree::atom("+")
};
}
/// A `Tree` is either…
#[derive(Debug, PartialEq)]
pub(crate) enum Tree<'text> {
/// …an atom containing text, or…
Atom(Cow<'text, str>),
/// …a list containing zero or more `Tree`s.
List(Vec<Tree<'text>>),
}
impl<'text> Tree<'text> {
/// Construct an Atom from a text scalar
pub(crate) fn atom(text: impl Into<Cow<'text, str>>) -> Tree<'text> {
Tree::Atom(text.into())
}
/// Construct a List from an iterable of trees
pub(crate) fn list(children: impl IntoIterator<Item = Tree<'text>>) -> Tree<'text> {
Tree::List(children.into_iter().collect())
}
/// Convenience function to create an atom containing quoted text
pub(crate) fn string(contents: impl AsRef<str>) -> Tree<'text> {
Tree::atom(format!("\"{}\"", contents.as_ref()))
}
/// Push a child node into self, turning it into a List if it was an Atom
pub(crate) fn push(self, tree: impl Into<Tree<'text>>) -> Tree<'text> {
match self {
Tree::List(mut children) => {
children.push(tree.into());
Tree::List(children)
},
Tree::Atom(text) => Tree::List(vec![Tree::Atom(text), tree.into()]),
}
}
/// Extend a self with a tail of Trees, turning self into a List if it was an
/// Atom
pub(crate) fn extend<I, T>(self, tail: I) -> Tree<'text>
where
I: IntoIterator<Item = T>,
T: Into<Tree<'text>>,
{
// Tree::List(children.into_iter().collect())
let mut head = match self {
Tree::List(children) => children,
Tree::Atom(text) => vec![Tree::Atom(text)],
};
for child in tail {
head.push(child.into());
}
Tree::List(head)
}
/// Like `push`, but modify self in-place
pub(crate) fn push_mut(&mut self, tree: impl Into<Tree<'text>>) {
let tree = mem::replace(self, Tree::List(Vec::new())).push(tree.into());
mem::replace(self, tree);
}
}
impl Display for Tree<'_> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Tree::List(children) => {
write!(f, "(")?;
for (i, child) in children.iter().enumerate() {
if i > 0 {
write!(f, " ")?;
}
write!(f, "{}", child)?;
}
write!(f, ")")
},
Tree::Atom(text) => write!(f, "{}", text),
}
}
}
impl<'text, T> From<T> for Tree<'text>
where
T: Into<Cow<'text, str>>,
{
fn from(text: T) -> Tree<'text> {
Tree::Atom(text.into())
}
}
| 23.480916 | 86 | 0.553966 |
f80016cbcff7c19432ecee51c572b89e5ab0c674
| 5,982 |
use std::io;
use std::net::{IpAddr, SocketAddr};
use futures::stream::Stream;
use futures::{self, Future, IntoFuture};
use tokio::net;
use endpoint::{Endpoint, ToEndpoint};
use resolver::{CpuPoolResolver, Resolver};
use {boxed, IoFuture};
lazy_static! {
static ref POOL: CpuPoolResolver = CpuPoolResolver::new(5);
}
/// Resolve a hostname to a sequence of ip addresses using the default resolver.
#[deprecated(since = "0.4.0", note = "used `resolve_ip_addr` instead")]
pub fn resolve(host: &str) -> IoFuture<Vec<IpAddr>> {
resolve_ip_addr(host)
}
/// Resolve a hostname to a sequence of ip addresses using the default resolver.
///
/// # Example
/// ```
/// tokio_dns::resolve_ip_addr("rust-lang.org");
/// ```
pub fn resolve_ip_addr(host: &str) -> IoFuture<Vec<IpAddr>> {
POOL.resolve(host)
}
/// Resolve a hostname to a sequence of ip addresses using a custom resolver.
///
/// # Example
/// ```
/// # use tokio_dns::CpuPoolResolver;
/// let resolver = CpuPoolResolver::new(10);
///
/// tokio_dns::resolve_ip_addr_with("rust-lang.org", resolver.clone());
/// ```
pub fn resolve_ip_addr_with<R>(host: &str, resolver: R) -> IoFuture<Vec<IpAddr>>
where
R: Resolver,
{
resolver.resolve(host)
}
/// Resolve an endpoint to a sequence of socket addresses using the default resolver.
///
/// # Example
/// ```
/// tokio_dns::resolve_sock_addr(("rust-lang.org", 80));
/// ```
pub fn resolve_sock_addr<'a, T>(endpoint: T) -> IoFuture<Vec<SocketAddr>>
where
T: ToEndpoint<'a>,
{
resolve_endpoint(endpoint, POOL.clone())
}
/// Resolve an endpoint to a sequence of socket addresses using a custom resolver.
///
/// # Example
/// ```
/// # use tokio_dns::CpuPoolResolver;
/// let resolver = CpuPoolResolver::new(10);
///
/// tokio_dns::resolve_sock_addr_with(("rust-lang.org", 80), resolver.clone());
/// ```
pub fn resolve_sock_addr_with<'a, T, R>(endpoint: T, resolver: R) -> IoFuture<Vec<SocketAddr>>
where
T: ToEndpoint<'a>,
R: Resolver,
{
resolve_endpoint(endpoint, resolver)
}
/// Shim for tokio::net::TcpStream
pub struct TcpStream;
impl TcpStream {
/// Connect to the endpoint using the default resolver.
pub fn connect<'a, T>(ep: T) -> IoFuture<net::TcpStream>
where
T: ToEndpoint<'a>,
{
TcpStream::connect_with(ep, POOL.clone())
}
/// Connect to the endpoint using a custom resolver.
pub fn connect_with<'a, T, R>(ep: T, resolver: R) -> IoFuture<net::TcpStream>
where
T: ToEndpoint<'a>,
R: Resolver,
{
boxed(
resolve_endpoint(ep, resolver).and_then(move |addrs| {
try_until_ok(addrs, move |addr| net::TcpStream::connect(&addr))
}),
)
}
}
/// Shim for tokio::net::TcpListener
pub struct TcpListener;
impl TcpListener {
/// Bind to the endpoint using the default resolver.
pub fn bind<'a, T>(ep: T) -> IoFuture<net::TcpListener>
where
T: ToEndpoint<'a>,
{
TcpListener::bind_with(ep, POOL.clone())
}
/// Bind to the endpoint using a custom resolver.
pub fn bind_with<'a, T, R>(ep: T, resolver: R) -> IoFuture<net::TcpListener>
where
T: ToEndpoint<'a>,
R: Resolver,
{
boxed(
resolve_endpoint(ep, resolver).and_then(move |addrs| {
try_until_ok(addrs, move |addr| net::TcpListener::bind(&addr))
}),
)
}
}
/// Shim for tokio::net::UdpSocket
pub struct UdpSocket;
impl UdpSocket {
/// Bind to the endpoint using the default resolver.
pub fn bind<'a, T>(ep: T) -> IoFuture<net::UdpSocket>
where
T: ToEndpoint<'a>,
{
UdpSocket::bind_with(ep, POOL.clone())
}
/// Bind to the endpoint using a custom resolver.
pub fn bind_with<'a, T, R>(ep: T, resolver: R) -> IoFuture<net::UdpSocket>
where
T: ToEndpoint<'a>,
R: Resolver,
{
boxed(
resolve_endpoint(ep, resolver).and_then(move |addrs| {
try_until_ok(addrs, move |addr| net::UdpSocket::bind(&addr))
}),
)
}
}
/// Resolves endpoint into a vector of socket addresses.
fn resolve_endpoint<'a, T, R>(ep: T, resolver: R) -> IoFuture<Vec<SocketAddr>>
where
R: Resolver,
T: ToEndpoint<'a>,
{
let ep = match ep.to_endpoint() {
Ok(ep) => ep,
Err(e) => return boxed(futures::failed(e)),
};
match ep {
Endpoint::Host(host, port) => boxed(resolver.resolve(host).map(move |addrs| {
addrs
.into_iter()
.map(|addr| SocketAddr::new(addr, port))
.collect()
})),
Endpoint::SocketAddr(addr) => boxed(futures::finished(vec![addr])),
}
}
fn try_until_ok<F, R, I>(addrs: Vec<SocketAddr>, f: F) -> IoFuture<I>
where
F: Fn(SocketAddr) -> R + Send + 'static,
R: IntoFuture<Item = I, Error = io::Error> + 'static,
R::Future: Send + 'static,
<R::Future as Future>::Error: From<io::Error>,
I: Send + 'static,
{
let result = Err(io::Error::new(
io::ErrorKind::Other,
"could not resolve to any address",
));
boxed(
futures::stream::iter_ok(addrs.into_iter())
.fold::<_, _, Box<Future<Item = _, Error = io::Error> + Send>>(
result,
move |prev, addr| {
match prev {
Ok(i) => {
// Keep first successful result.
boxed(futures::finished(Ok(i)))
}
Err(..) => {
// Ignore previous error and try next address.
let future = f(addr).into_future();
// Lift future error into item to avoid short-circuit exit from fold.
boxed(future.then(Ok))
}
}
},
)
.and_then(|r| r),
)
}
| 28.350711 | 97 | 0.570211 |
1e99e831e58320ad722a38ae3c52c8f13b66ecca
| 13,218 |
use crate::*;
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "persistence", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "persistence", serde(default))]
pub(crate) struct State {
/// Positive offset means scrolling down/right
offset: Vec2,
show_scroll: bool,
/// Momentum, used for kinetic scrolling
#[cfg_attr(feature = "persistence", serde(skip))]
pub vel: Vec2,
/// Mouse offset relative to the top of the handle when started moving the handle.
scroll_start_offset_from_top: Option<f32>,
}
impl Default for State {
fn default() -> Self {
Self {
offset: Vec2::ZERO,
show_scroll: false,
vel: Vec2::ZERO,
scroll_start_offset_from_top: None,
}
}
}
// TODO: rename VScroll
/// Add vertical scrolling to a contained [`Ui`].
#[derive(Clone, Debug)]
pub struct ScrollArea {
max_height: f32,
always_show_scroll: bool,
id_source: Option<Id>,
offset: Option<Vec2>,
}
impl ScrollArea {
/// Will make the area be as high as it is allowed to be (i.e. fill the [`Ui`] it is in)
pub fn auto_sized() -> Self {
Self::from_max_height(f32::INFINITY)
}
/// Use `f32::INFINITY` if you want the scroll area to expand to fit the surrounding Ui
pub fn from_max_height(max_height: f32) -> Self {
Self {
max_height,
always_show_scroll: false,
id_source: None,
offset: None,
}
}
/// If `false` (default), the scroll bar will be hidden when not needed/
/// If `true`, the scroll bar will always be displayed even if not needed.
pub fn always_show_scroll(mut self, always_show_scroll: bool) -> Self {
self.always_show_scroll = always_show_scroll;
self
}
/// A source for the unique `Id`, e.g. `.id_source("second_scroll_area")` or `.id_source(loop_index)`.
pub fn id_source(mut self, id_source: impl std::hash::Hash) -> Self {
self.id_source = Some(Id::new(id_source));
self
}
/// Set the vertical scroll offset position.
///
/// See also: [`Ui::scroll_to_cursor`](crate::ui::Ui::scroll_to_cursor) and
/// [`Response::scroll_to_me`](crate::Response::scroll_to_me)
pub fn scroll_offset(mut self, offset: f32) -> Self {
self.offset = Some(Vec2::new(0.0, offset));
self
}
}
struct Prepared {
id: Id,
state: State,
current_scroll_bar_width: f32,
always_show_scroll: bool,
inner_rect: Rect,
content_ui: Ui,
}
impl ScrollArea {
fn begin(self, ui: &mut Ui) -> Prepared {
let Self {
max_height,
always_show_scroll,
id_source,
offset,
} = self;
let ctx = ui.ctx().clone();
let id_source = id_source.unwrap_or_else(|| Id::new("scroll_area"));
let id = ui.make_persistent_id(id_source);
let mut state = *ctx.memory().id_data.get_or_default::<State>(id);
if let Some(offset) = offset {
state.offset = offset;
}
// content: size of contents (generally large; that's why we want scroll bars)
// outer: size of scroll area including scroll bar(s)
// inner: excluding scroll bar(s). The area we clip the contents to.
let max_scroll_bar_width = max_scroll_bar_width_with_margin(ui);
let current_scroll_bar_width = if always_show_scroll {
max_scroll_bar_width
} else {
max_scroll_bar_width * ui.ctx().animate_bool(id, state.show_scroll)
};
let available_outer = ui.available_rect_before_wrap();
let outer_size = vec2(
available_outer.width(),
available_outer.height().at_most(max_height),
);
let inner_size = outer_size - vec2(current_scroll_bar_width, 0.0);
let inner_rect = Rect::from_min_size(available_outer.min, inner_size);
let mut content_ui = ui.child_ui(
Rect::from_min_size(
inner_rect.min - state.offset,
vec2(inner_size.x, f32::INFINITY),
),
*ui.layout(),
);
let mut content_clip_rect = inner_rect.expand(ui.visuals().clip_rect_margin);
content_clip_rect = content_clip_rect.intersect(ui.clip_rect());
content_clip_rect.max.x = ui.clip_rect().max.x - current_scroll_bar_width; // Nice handling of forced resizing beyond the possible
content_ui.set_clip_rect(content_clip_rect);
Prepared {
id,
state,
current_scroll_bar_width,
always_show_scroll,
inner_rect,
content_ui,
}
}
pub fn show<R>(self, ui: &mut Ui, add_contents: impl FnOnce(&mut Ui) -> R) -> R {
let mut prepared = self.begin(ui);
let ret = add_contents(&mut prepared.content_ui);
prepared.end(ui);
ret
}
}
impl Prepared {
fn end(self, ui: &mut Ui) {
let Prepared {
id,
mut state,
inner_rect,
always_show_scroll,
mut current_scroll_bar_width,
content_ui,
} = self;
let content_size = content_ui.min_size();
// We take the scroll target so only this ScrollArea will use it.
let scroll_target = content_ui.ctx().frame_state().scroll_target.take();
if let Some((scroll_y, align)) = scroll_target {
let center_factor = align.to_factor();
let top = content_ui.min_rect().top();
let visible_range = top..=top + content_ui.clip_rect().height();
let offset_y = scroll_y - lerp(visible_range, center_factor);
let mut spacing = ui.spacing().item_spacing.y;
// Depending on the alignment we need to add or subtract the spacing
spacing *= remap(center_factor, 0.0..=1.0, -1.0..=1.0);
state.offset.y = offset_y + spacing;
}
let width = if inner_rect.width().is_finite() {
inner_rect.width().max(content_size.x) // Expand width to fit content
} else {
// ScrollArea is in an infinitely wide parent
content_size.x
};
let inner_rect = Rect::from_min_size(inner_rect.min, vec2(width, inner_rect.height()));
let outer_rect = Rect::from_min_size(
inner_rect.min,
inner_rect.size() + vec2(current_scroll_bar_width, 0.0),
);
let content_is_too_small = content_size.y > inner_rect.height();
if content_is_too_small {
// Drag contents to scroll (for touch screens mostly):
let content_response = ui.interact(inner_rect, id.with("area"), Sense::drag());
let input = ui.input();
if content_response.dragged() {
state.offset.y -= input.pointer.delta().y;
state.vel = input.pointer.velocity();
} else {
let stop_speed = 20.0; // Pixels per second.
let friction_coeff = 1000.0; // Pixels per second squared.
let dt = input.unstable_dt;
let friction = friction_coeff * dt;
if friction > state.vel.length() || state.vel.length() < stop_speed {
state.vel = Vec2::ZERO;
} else {
state.vel -= friction * state.vel.normalized();
// Offset has an inverted coordinate system compared to
// the velocity, so we subtract it instead of adding it
state.offset.y -= state.vel.y * dt;
ui.ctx().request_repaint();
}
}
}
let max_offset = content_size.y - inner_rect.height();
if ui.rect_contains_pointer(outer_rect) {
let mut frame_state = ui.ctx().frame_state();
let scroll_delta = frame_state.scroll_delta;
let scrolling_up = state.offset.y > 0.0 && scroll_delta.y > 0.0;
let scrolling_down = state.offset.y < max_offset && scroll_delta.y < 0.0;
if scrolling_up || scrolling_down {
state.offset.y -= scroll_delta.y;
// Clear scroll delta so no parent scroll will use it.
frame_state.scroll_delta = Vec2::ZERO;
}
}
let show_scroll_this_frame = content_is_too_small || always_show_scroll;
let max_scroll_bar_width = max_scroll_bar_width_with_margin(ui);
if show_scroll_this_frame && current_scroll_bar_width <= 0.0 {
// Avoid frame delay; start showing scroll bar right away:
current_scroll_bar_width = max_scroll_bar_width * ui.ctx().animate_bool(id, true);
}
if current_scroll_bar_width > 0.0 {
let animation_t = current_scroll_bar_width / max_scroll_bar_width;
// margin between contents and scroll bar
let margin = animation_t * ui.spacing().item_spacing.x;
let left = inner_rect.right() + margin;
let right = outer_rect.right();
let corner_radius = (right - left) / 2.0;
let top = inner_rect.top();
let bottom = inner_rect.bottom();
let outer_scroll_rect = Rect::from_min_max(
pos2(left, inner_rect.top()),
pos2(right, inner_rect.bottom()),
);
let from_content =
|content_y| remap_clamp(content_y, 0.0..=content_size.y, top..=bottom);
let handle_rect = Rect::from_min_max(
pos2(left, from_content(state.offset.y)),
pos2(right, from_content(state.offset.y + inner_rect.height())),
);
let interact_id = id.with("vertical");
let response = ui.interact(outer_scroll_rect, interact_id, Sense::click_and_drag());
if let Some(pointer_pos) = response.interact_pointer_pos() {
let scroll_start_offset_from_top =
state.scroll_start_offset_from_top.get_or_insert_with(|| {
if handle_rect.contains(pointer_pos) {
pointer_pos.y - handle_rect.top()
} else {
let handle_top_pos_at_bottom = bottom - handle_rect.height();
// Calculate the new handle top position, centering the handle on the mouse.
let new_handle_top_pos = (pointer_pos.y - handle_rect.height() / 2.0)
.clamp(top, handle_top_pos_at_bottom);
pointer_pos.y - new_handle_top_pos
}
});
let new_handle_top = pointer_pos.y - *scroll_start_offset_from_top;
state.offset.y = remap(new_handle_top, top..=bottom, 0.0..=content_size.y);
} else {
state.scroll_start_offset_from_top = None;
}
let unbounded_offset_y = state.offset.y;
state.offset.y = state.offset.y.max(0.0);
state.offset.y = state.offset.y.min(max_offset);
if state.offset.y != unbounded_offset_y {
state.vel = Vec2::ZERO;
}
// Avoid frame-delay by calculating a new handle rect:
let mut handle_rect = Rect::from_min_max(
pos2(left, from_content(state.offset.y)),
pos2(right, from_content(state.offset.y + inner_rect.height())),
);
let min_handle_height = (2.0 * corner_radius).max(8.0);
if handle_rect.size().y < min_handle_height {
handle_rect = Rect::from_center_size(
handle_rect.center(),
vec2(handle_rect.size().x, min_handle_height),
);
}
let visuals = ui.style().interact(&response);
ui.painter().add(epaint::Shape::Rect {
rect: outer_scroll_rect,
corner_radius,
fill: ui.visuals().extreme_bg_color,
stroke: Default::default(),
// fill: visuals.bg_fill,
// stroke: visuals.bg_stroke,
});
ui.painter().add(epaint::Shape::Rect {
rect: handle_rect.expand(-2.0),
corner_radius,
fill: visuals.bg_fill,
stroke: visuals.bg_stroke,
});
}
let size = vec2(
outer_rect.size().x,
outer_rect.size().y.min(content_size.y), // shrink if content is so small that we don't need scroll bars
);
ui.advance_cursor_after_rect(Rect::from_min_size(outer_rect.min, size));
if show_scroll_this_frame != state.show_scroll {
ui.ctx().request_repaint();
}
state.offset.y = state.offset.y.min(content_size.y - inner_rect.height());
state.offset.y = state.offset.y.max(0.0);
state.show_scroll = show_scroll_this_frame;
ui.memory().id_data.insert(id, state);
}
}
fn max_scroll_bar_width_with_margin(ui: &Ui) -> f32 {
ui.spacing().item_spacing.x + 16.0
}
| 36.513812 | 138 | 0.574066 |
56e96c5f3c7e6a9dfc19925efa0afc2e405a97f4
| 13,922 |
//! Module implementing custom syntax for [`Engine`].
use crate::ast::Expr;
use crate::engine::EvalContext;
use crate::func::native::SendSync;
use crate::r#unsafe::unsafe_try_cast;
use crate::tokenizer::{is_valid_identifier, Token};
use crate::types::dynamic::Variant;
use crate::{
Engine, Identifier, ImmutableString, LexError, ParseError, Position, RhaiResult, Shared,
StaticVec, INT,
};
#[cfg(feature = "no_std")]
use std::prelude::v1::*;
use std::{any::TypeId, ops::Deref};
/// Collection of special markers for custom syntax definition.
pub mod markers {
/// Special marker for matching an expression.
pub const CUSTOM_SYNTAX_MARKER_EXPR: &str = "$expr$";
/// Special marker for matching a statements block.
pub const CUSTOM_SYNTAX_MARKER_BLOCK: &str = "$block$";
/// Special marker for matching an identifier.
pub const CUSTOM_SYNTAX_MARKER_IDENT: &str = "$ident$";
/// Special marker for matching a single symbol.
pub const CUSTOM_SYNTAX_MARKER_SYMBOL: &str = "$symbol$";
/// Special marker for matching a string literal.
pub const CUSTOM_SYNTAX_MARKER_STRING: &str = "$string$";
/// Special marker for matching an integer number.
pub const CUSTOM_SYNTAX_MARKER_INT: &str = "$int$";
/// Special marker for matching a floating-point number.
#[cfg(not(feature = "no_float"))]
pub const CUSTOM_SYNTAX_MARKER_FLOAT: &str = "$float$";
/// Special marker for matching a boolean value.
pub const CUSTOM_SYNTAX_MARKER_BOOL: &str = "$bool$";
/// Special marker for identifying the custom syntax variant.
pub const CUSTOM_SYNTAX_MARKER_SYNTAX_VARIANT: &str = "$$";
}
/// A general expression evaluation trait object.
#[cfg(not(feature = "sync"))]
pub type FnCustomSyntaxEval = dyn Fn(&mut EvalContext, &[Expression]) -> RhaiResult;
/// A general expression evaluation trait object.
#[cfg(feature = "sync")]
pub type FnCustomSyntaxEval = dyn Fn(&mut EvalContext, &[Expression]) -> RhaiResult + Send + Sync;
/// A general expression parsing trait object.
#[cfg(not(feature = "sync"))]
pub type FnCustomSyntaxParse =
dyn Fn(&[ImmutableString], &str) -> Result<Option<ImmutableString>, ParseError>;
/// A general expression parsing trait object.
#[cfg(feature = "sync")]
pub type FnCustomSyntaxParse =
dyn Fn(&[ImmutableString], &str) -> Result<Option<ImmutableString>, ParseError> + Send + Sync;
/// An expression sub-tree in an [`AST`][crate::AST].
#[derive(Debug, Clone)]
pub struct Expression<'a>(&'a Expr);
impl<'a> From<&'a Expr> for Expression<'a> {
#[inline(always)]
fn from(expr: &'a Expr) -> Self {
Self(expr)
}
}
impl Expression<'_> {
/// If this expression is a variable name, return it. Otherwise [`None`].
#[inline(always)]
#[must_use]
pub fn get_variable_name(&self) -> Option<&str> {
self.0.get_variable_name(true)
}
/// Get the position of this expression.
#[inline(always)]
#[must_use]
pub const fn position(&self) -> Position {
self.0.position()
}
/// Get the value of this expression if it is a literal constant.
/// Supports [`INT`][crate::INT], [`FLOAT`][crate::FLOAT], `()`, `char`, `bool` and
/// [`ImmutableString`][crate::ImmutableString].
///
/// Returns [`None`] also if the constant is not of the specified type.
#[inline]
#[must_use]
pub fn get_literal_value<T: Variant>(&self) -> Option<T> {
// Coded this way in order to maximally leverage potentials for dead-code removal.
if TypeId::of::<T>() == TypeId::of::<INT>() {
return match self.0 {
Expr::IntegerConstant(x, _) => unsafe_try_cast(*x).ok(),
_ => None,
};
}
#[cfg(not(feature = "no_float"))]
if TypeId::of::<T>() == TypeId::of::<crate::FLOAT>() {
return match self.0 {
Expr::FloatConstant(x, _) => unsafe_try_cast(*x).ok(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<char>() {
return match self.0 {
Expr::CharConstant(x, _) => unsafe_try_cast(*x).ok(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<ImmutableString>() {
return match self.0 {
Expr::StringConstant(x, _) => unsafe_try_cast(x.clone()).ok(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<bool>() {
return match self.0 {
Expr::BoolConstant(x, _) => unsafe_try_cast(*x).ok(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<()>() {
return match self.0 {
Expr::Unit(_) => unsafe_try_cast(()).ok(),
_ => None,
};
}
None
}
}
impl AsRef<Expr> for Expression<'_> {
#[inline(always)]
fn as_ref(&self) -> &Expr {
&self.0
}
}
impl Deref for Expression<'_> {
type Target = Expr;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl EvalContext<'_, '_, '_, '_, '_, '_, '_, '_> {
/// Evaluate an [expression tree][Expression].
///
/// # WARNING - Low Level API
///
/// This function is very low level. It evaluates an expression from an [`AST`][crate::AST].
#[inline(always)]
pub fn eval_expression_tree(&mut self, expr: &Expression) -> RhaiResult {
self.engine.eval_expr(
self.scope,
self.mods,
self.state,
self.lib,
self.this_ptr,
expr,
self.level,
)
}
}
/// Definition of a custom syntax definition.
pub struct CustomSyntax {
/// A parsing function to return the next token in a custom syntax based on the
/// symbols parsed so far.
pub parse: Box<FnCustomSyntaxParse>,
/// Custom syntax implementation function.
pub func: Shared<FnCustomSyntaxEval>,
/// Any variables added/removed in the scope?
pub scope_may_be_changed: bool,
}
impl Engine {
/// Register a custom syntax with the [`Engine`].
///
/// * `symbols` holds a slice of strings that define the custom syntax.
/// * `scope_may_be_changed` specifies variables _may_ be added/removed by this custom syntax.
/// * `func` is the implementation function.
///
/// ## Note on `symbols`
///
/// * Whitespaces around symbols are stripped.
/// * Symbols that are all-whitespace or empty are ignored.
/// * If `symbols` does not contain at least one valid token, then the custom syntax registration
/// is simply ignored.
///
/// ## Note on `scope_may_be_changed`
///
/// If `scope_may_be_changed` is `true`, then _size_ of the current [`Scope`][crate::Scope]
/// _may_ be modified by this custom syntax.
///
/// Adding new variables and/or removing variables count.
///
/// Simply modifying the values of existing variables does NOT count, as the _size_ of the
/// current [`Scope`][crate::Scope] is unchanged, so `false` should be passed.
///
/// Replacing one variable with another (i.e. adding a new variable and removing one variable at
/// the same time so that the total _size_ of the [`Scope`][crate::Scope] is unchanged) also
/// does NOT count, so `false` should be passed.
pub fn register_custom_syntax<S: AsRef<str> + Into<Identifier>>(
&mut self,
symbols: &[S],
scope_may_be_changed: bool,
func: impl Fn(&mut EvalContext, &[Expression]) -> RhaiResult + SendSync + 'static,
) -> Result<&mut Self, ParseError> {
use markers::*;
let mut segments = StaticVec::<ImmutableString>::new();
for s in symbols {
let s = s.as_ref().trim();
// Skip empty symbols
if s.is_empty() {
continue;
}
let token = Token::lookup_from_syntax(s);
let seg = match s {
// Markers not in first position
CUSTOM_SYNTAX_MARKER_IDENT
| CUSTOM_SYNTAX_MARKER_SYMBOL
| CUSTOM_SYNTAX_MARKER_EXPR
| CUSTOM_SYNTAX_MARKER_BLOCK
| CUSTOM_SYNTAX_MARKER_BOOL
| CUSTOM_SYNTAX_MARKER_INT
| CUSTOM_SYNTAX_MARKER_STRING
if !segments.is_empty() =>
{
s.into()
}
// Markers not in first position
#[cfg(not(feature = "no_float"))]
CUSTOM_SYNTAX_MARKER_FLOAT if !segments.is_empty() => s.into(),
// Standard or reserved keyword/symbol not in first position
_ if !segments.is_empty() && token.is_some() => {
// Make it a custom keyword/symbol if it is disabled or reserved
if (self.disabled_symbols.contains(s)
|| token.map_or(false, |v| v.is_reserved()))
&& !self.custom_keywords.contains_key(s)
{
self.custom_keywords.insert(s.into(), None);
}
s.into()
}
// Standard keyword in first position but not disabled
_ if segments.is_empty()
&& token.as_ref().map_or(false, |v| v.is_standard_keyword())
&& !self.disabled_symbols.contains(s) =>
{
return Err(LexError::ImproperSymbol(
s.to_string(),
format!(
"Improper symbol for custom syntax at position #{}: '{}'",
segments.len() + 1,
s
),
)
.into_err(Position::NONE));
}
// Identifier in first position
_ if segments.is_empty() && is_valid_identifier(s.chars()) => {
// Make it a custom keyword/symbol if it is disabled or reserved
if self.disabled_symbols.contains(s) || token.map_or(false, |v| v.is_reserved())
{
if !self.custom_keywords.contains_key(s) {
self.custom_keywords.insert(s.into(), None);
}
}
s.into()
}
// Anything else is an error
_ => {
return Err(LexError::ImproperSymbol(
s.to_string(),
format!(
"Improper symbol for custom syntax at position #{}: '{}'",
segments.len() + 1,
s
),
)
.into_err(Position::NONE));
}
};
segments.push(seg);
}
// If the syntax has no symbols, just ignore the registration
if segments.is_empty() {
return Ok(self);
}
// The first keyword is the discriminator
let key = segments[0].clone();
self.register_custom_syntax_raw(
key,
// Construct the parsing function
move |stream, _| {
if stream.len() >= segments.len() {
Ok(None)
} else {
Ok(Some(segments[stream.len()].clone()))
}
},
scope_may_be_changed,
func,
);
Ok(self)
}
/// Register a custom syntax with the [`Engine`].
///
/// # WARNING - Low Level API
///
/// This function is very low level.
///
/// * `scope_may_be_changed` specifies variables have been added/removed by this custom syntax.
/// * `parse` is the parsing function.
/// * `func` is the implementation function.
///
/// All custom keywords used as symbols must be manually registered via [`Engine::register_custom_operator`].
/// Otherwise, they won't be recognized.
///
/// # Implementation Function Signature
///
/// The implementation function has the following signature:
///
/// > `Fn(symbols: &[ImmutableString], look_ahead: &str) -> Result<Option<ImmutableString>, ParseError>`
///
/// where:
/// * `symbols`: a slice of symbols that have been parsed so far, possibly containing `$expr$` and/or `$block$`;
/// `$ident$` and other literal markers are replaced by the actual text
/// * `look_ahead`: a string slice containing the next symbol that is about to be read
///
/// ## Return value
///
/// * `Ok(None)`: parsing complete and there are no more symbols to match.
/// * `Ok(Some(symbol))`: the next symbol to match, which can also be `$expr$`, `$ident$` or `$block$`.
/// * `Err(ParseError)`: error that is reflected back to the [`Engine`], normally `ParseError(ParseErrorType::BadInput(LexError::ImproperSymbol(message)), Position::NONE)` to indicate a syntax error, but it can be any [`ParseError`].
pub fn register_custom_syntax_raw(
&mut self,
key: impl Into<Identifier>,
parse: impl Fn(&[ImmutableString], &str) -> Result<Option<ImmutableString>, ParseError>
+ SendSync
+ 'static,
scope_may_be_changed: bool,
func: impl Fn(&mut EvalContext, &[Expression]) -> RhaiResult + SendSync + 'static,
) -> &mut Self {
self.custom_syntax.insert(
key.into(),
CustomSyntax {
parse: Box::new(parse),
func: (Box::new(func) as Box<FnCustomSyntaxEval>).into(),
scope_may_be_changed,
}
.into(),
);
self
}
}
| 37.728997 | 237 | 0.55028 |
f800a2b360f24c34978672255bb42697ac784009
| 21,083 |
//! Swift Demangling Tests
//! All functions were compiled with Swift 4.0 in a file called mangling.swift
//! see https://github.com/apple/swift/blob/master/test/SILGen/mangling.swift
#![cfg(feature = "swift")]
#[macro_use]
mod utils;
use symbolic_common::Language;
use symbolic_demangle::DemangleOptions;
#[test]
fn test_demangle_swift_short() {
assert_demangle!(Language::Swift, DemangleOptions::name_only().parameters(true), {
// Swift < 4 (old mangling)
"_T08mangling0022egbpdajGbuEbxfgehfvwxnyyF" => "ليهمابتكلموشعربي؟()",
"_T08mangling0024ihqwcrbEcvIaIdqgAFGpqjyeyyF" => "他们为什么不说中文()",
"_T08mangling0027ihqwctvzcJBfGFJdrssDxIboAybyyF" => "他們爲什麽不說中文()",
"_T08mangling0030Proprostnemluvesky_uybCEdmaEBayyF" => "Pročprostěnemluvíčesky()",
"_T08mangling9r13757744ySaySiG1x_tF" => "r13757744(x: [Int])",
"_T08mangling9r13757744ySi1xd_tF" => "r13757744(x: Int...)",
"_T08mangling2psopyxlF" => "+- prefix<A>(A)",
"_T08mangling2psoPyxlF" => "+- postfix<A>(A)",
"_T08mangling2psoiyx_xtlF" => "+- infix<A>(A, A)",
"_T08mangling2psopyx1a_x1bt_tlF" => "+- prefix<A>((a: A, b: A))",
"_T08mangling2psoPyx1a_x1bt_tlF" => "+- postfix<A>((a: A, b: A))",
"_T08mangling007p_qcaDcoiS2i_SitF" => "«+» infix(Int, Int)",
"_T08mangling12any_protocolyypF" => "any_protocol(Any)",
"_T08mangling12one_protocolyAA3Foo_pF" => "one_protocol(Foo)",
"_T08mangling18one_protocol_twiceyAA3Foo_p_AaC_ptF" => "one_protocol_twice(Foo, Foo)",
"_T08mangling12two_protocolyAA3Bar_AA3FoopF" => "two_protocol(Bar & Foo)",
"_T08mangling3ZimC4zangyx_qd__tlF" => "Zim.zang<A>(A, A1)",
"_T08mangling3ZimC4zungyqd___xtlF" => "Zim.zung<A>(A1, A)",
"_T08mangling27single_protocol_compositionyAA3Foo_p1x_tF" => "single_protocol_composition(x: Foo)",
"_T08mangling28uses_objc_class_and_protocolySo8NSObjectC1o_So8NSAnsing_p1ptF" => "uses_objc_class_and_protocol(o: NSObject, p: NSAnsing)",
"_T08mangling17uses_clang_structySC6NSRectV1r_tF" => "uses_clang_struct(r: NSRect)",
"_T08mangling14uses_optionalss7UnicodeO6ScalarVSgSiSg1x_tF" => "uses_optionals(x: Int?)",
"_T08mangling12GenericUnionO3FooACyxGSicAEmlF" => "GenericUnion.Foo<A>(GenericUnion<A>.Type)",
"_T08mangling10HasVarInitV5stateSbvpZfiSbyKXKfu_" => "implicit closure #1 in variable initialization expression of static HasVarInit.state",
"_T08mangling19autoClosureOverloadySiyXK1f_tF" => "autoClosureOverload(f: @autoclosure ())",
"_T08mangling19autoClosureOverloadySiyc1f_tF" => "autoClosureOverload(f: ())",
"_T08mangling24autoClosureOverloadCallsyyF" => "autoClosureOverloadCalls()",
"_T08mangling4fooAyxAA12HasAssocTypeRzlF" => "fooA<A>(A)",
"_T08mangling4fooByxAA12HasAssocTypeRzAA0D4Reqt0D0RpzlF" => "fooB<A>(A)",
"_T08mangling2qqoiySi_SitF" => "?? infix(Int, Int)",
"_T08mangling24InstanceAndClassPropertyV8propertySivg" => "InstanceAndClassProperty.property.getter",
"_T08mangling24InstanceAndClassPropertyV8propertySivs" => "InstanceAndClassProperty.property.setter",
"_T08mangling24InstanceAndClassPropertyV8propertySivgZ" => "static InstanceAndClassProperty.property.getter",
"_T08mangling24InstanceAndClassPropertyV8propertySivsZ" => "static InstanceAndClassProperty.property.setter",
"_T08mangling6curry1yyF" => "curry1()",
"_T08mangling3barSiyKF" => "bar()",
"_T08mangling12curry1ThrowsyyKF" => "curry1Throws()",
"_T08mangling12curry2ThrowsyycyKF" => "curry2Throws()",
"_T08mangling6curry3yyKcyF" => "curry3()",
"_T08mangling12curry3ThrowsyyKcyKF" => "curry3Throws()",
"_T08mangling14varargsVsArrayySi3arrd_SS1ntF" => "varargsVsArray(arr: Int..., n: String)",
"_T08mangling14varargsVsArrayySaySiG3arr_SS1ntF" => "varargsVsArray(arr: [Int], n: String)",
"_T08mangling14varargsVsArrayySaySiG3arrd_SS1ntF" => "varargsVsArray(arr: [Int]..., n: String)",
// Swift 4.2
"$S8mangling0022egbpdajGbuEbxfgehfvwxnyyF" => "ليهمابتكلموشعربي؟()",
"$S8mangling0024ihqwcrbEcvIaIdqgAFGpqjyeyyF" => "他们为什么不说中文()",
"$S8mangling0027ihqwctvzcJBfGFJdrssDxIboAybyyF" => "他們爲什麽不說中文()",
"$S8mangling0030Proprostnemluvesky_uybCEdmaEBayyF" => "Pročprostěnemluvíčesky()",
"$S8mangling9r137577441xySaySiG_tF" => "r13757744(x: [Int])",
"$S8mangling9r137577441xySid_tF" => "r13757744(x: Int...)",
"$S8mangling2psopyyxlF" => "+- prefix<A>(A)",
"$S8mangling2psoPyyxlF" => "+- postfix<A>(A)",
"$S8mangling2psoiyyx_xtlF" => "+- infix<A>(A, A)",
"$S8mangling2psopyyx1a_x1bt_tlF" => "+- prefix<A>((a: A, b: A))",
"$S8mangling2psoPyyx1a_x1bt_tlF" => "+- postfix<A>((a: A, b: A))",
"$S8mangling007p_qcaDcoiyS2i_SitF" => "«+» infix(Int, Int)",
"$S8mangling12any_protocolyyypF" => "any_protocol(Any)",
"$S8mangling12one_protocolyyAA3Foo_pF" => "one_protocol(Foo)",
"$S8mangling18one_protocol_twiceyyAA3Foo_p_AaC_ptF" => "one_protocol_twice(Foo, Foo)",
"$S8mangling12two_protocolyyAA3Bar_AA3FoopF" => "two_protocol(Bar & Foo)",
"$S8mangling3ZimC4zangyyx_qd__tlF" => "Zim.zang<A>(A, A1)",
"$S8mangling3ZimC4zungyyqd___xtlF" => "Zim.zung<A>(A1, A)",
"$S8mangling28uses_objc_class_and_protocol1o1p2p2ySo8NSObjectC_So8NSAnsing_pSo14NSBetterAnsing_ptF" => "uses_objc_class_and_protocol(o: NSObject, p: NSAnsing, p2: NSBetterAnsing)",
"$S8mangling17uses_clang_struct1rySo6NSRectV_tF" => "uses_clang_struct(r: NSRect)",
"$S8mangling14uses_optionals1xs7UnicodeO6ScalarVSgSiSg_tF" => "uses_optionals(x: Int?)",
"$S8mangling12GenericUnionO3FooyACyxGSicAEmlF" => "GenericUnion.Foo<A>(GenericUnion<A>.Type)",
"$S8mangling10HasVarInitV5stateSbvpZfiSbyKXKfu_" => "implicit closure #1 in variable initialization expression of static HasVarInit.state",
"$S8mangling19autoClosureOverload1fySiyXK_tF" => "autoClosureOverload(f: @autoclosure ())",
"$S8mangling19autoClosureOverload1fySiyXE_tF" => "autoClosureOverload(f: ())",
"$S8mangling24autoClosureOverloadCallsyyF" => "autoClosureOverloadCalls()",
"$S8mangling4fooAyyxAA12HasAssocTypeRzlF" => "fooA<A>(A)",
"$S8mangling4fooByyxAA12HasAssocTypeRzAA0D4Reqt0D0RpzlF" => "fooB<A>(A)",
"$S8mangling2qqoiyySi_SitF" => "?? infix(Int, Int)",
"$S8mangling24InstanceAndClassPropertyV8propertySivg" => "InstanceAndClassProperty.property.getter",
"$S8mangling24InstanceAndClassPropertyV8propertySivs" => "InstanceAndClassProperty.property.setter",
"$S8mangling24InstanceAndClassPropertyV8propertySivgZ" => "static InstanceAndClassProperty.property.getter",
"$S8mangling24InstanceAndClassPropertyV8propertySivsZ" => "static InstanceAndClassProperty.property.setter",
"$S8mangling6curry1yyF" => "curry1()",
"$S8mangling3barSiyKF" => "bar()",
"$S8mangling12curry1ThrowsyyKF" => "curry1Throws()",
"$S8mangling12curry2ThrowsyycyKF" => "curry2Throws()",
"$S8mangling6curry3yyKcyF" => "curry3()",
"$S8mangling12curry3ThrowsyyKcyKF" => "curry3Throws()",
"$S8mangling14varargsVsArray3arr1nySid_SStF" => "varargsVsArray(arr: Int..., n: String)",
"$S8mangling14varargsVsArray3arr1nySaySiG_SStF" => "varargsVsArray(arr: [Int], n: String)",
"$S8mangling14varargsVsArray3arr1nySaySiGd_SStF" => "varargsVsArray(arr: [Int]..., n: String)",
// Swift 5
"$s8mangling0022egbpdajGbuEbxfgehfvwxnyyF" => "ليهمابتكلموشعربي؟()",
"$s8mangling0024ihqwcrbEcvIaIdqgAFGpqjyeyyF" => "他们为什么不说中文()",
"$s8mangling0027ihqwctvzcJBfGFJdrssDxIboAybyyF" => "他們爲什麽不說中文()",
"$s8mangling0030Proprostnemluvesky_uybCEdmaEBayyF" => "Pročprostěnemluvíčesky()",
"$s8mangling9r137577441xySaySiG_tF" => "r13757744(x: [Int])",
"$s8mangling9r137577441xySid_tF" => "r13757744(x: Int...)",
"$s8mangling2psopyyxlF" => "+- prefix<A>(A)",
"$s8mangling2psoPyyxlF" => "+- postfix<A>(A)",
"$s8mangling2psoiyyx_xtlF" => "+- infix<A>(A, A)",
"$s8mangling2psopyyx1a_x1bt_tlF" => "+- prefix<A>((a: A, b: A))",
"$s8mangling2psoPyyx1a_x1bt_tlF" => "+- postfix<A>((a: A, b: A))",
"$s8mangling007p_qcaDcoiyS2i_SitF" => "«+» infix(Int, Int)",
"$s8mangling12any_protocolyyypF" => "any_protocol(Any)",
"$s8mangling12one_protocolyyAA3Foo_pF" => "one_protocol(Foo)",
"$s8mangling18one_protocol_twiceyyAA3Foo_p_AaC_ptF" => "one_protocol_twice(Foo, Foo)",
"$s8mangling12two_protocolyyAA3Bar_AA3FoopF" => "two_protocol(Bar & Foo)",
"$s8mangling3ZimC4zangyyx_qd__tlF" => "Zim.zang<A>(A, A1)",
"$s8mangling3ZimC4zungyyqd___xtlF" => "Zim.zung<A>(A1, A)",
"$s8mangling28uses_objc_class_and_protocol1o1p2p2ySo8NSObjectC_So8NSAnsing_pSo14NSBetterAnsing_ptF" => "uses_objc_class_and_protocol(o: NSObject, p: NSAnsing, p2: NSBetterAnsing)",
"$s8mangling17uses_clang_struct1rySo6NSRectV_tF" => "uses_clang_struct(r: NSRect)",
"$s8mangling14uses_optionals1xs7UnicodeO6ScalarVSgSiSg_tF" => "uses_optionals(x: Int?)",
"$s8mangling12GenericUnionO3FooyACyxGSicAEmlF" => "GenericUnion.Foo<A>(GenericUnion<A>.Type)",
"$s8mangling10HasVarInitV5stateSbvpZfiSbyKXKfu_" => "implicit closure #1 in variable initialization expression of static HasVarInit.state",
"$s8mangling19autoClosureOverload1fySiyXK_tF" => "autoClosureOverload(f: @autoclosure ())",
"$s8mangling19autoClosureOverload1fySiyXE_tF" => "autoClosureOverload(f: ())",
"$s8mangling24autoClosureOverloadCallsyyF" => "autoClosureOverloadCalls()",
"$s8mangling4fooAyyxAA12HasAssocTypeRzlF" => "fooA<A>(A)",
"$s8mangling4fooByyxAA12HasAssocTypeRzAA0D4Reqt0D0RpzlF" => "fooB<A>(A)",
"$s8mangling2qqoiyySi_SitF" => "?? infix(Int, Int)",
"$s8mangling24InstanceAndClassPropertyV8propertySivg" => "InstanceAndClassProperty.property.getter",
"$s8mangling24InstanceAndClassPropertyV8propertySivs" => "InstanceAndClassProperty.property.setter",
"$s8mangling24InstanceAndClassPropertyV8propertySivgZ" => "static InstanceAndClassProperty.property.getter",
"$s8mangling24InstanceAndClassPropertyV8propertySivsZ" => "static InstanceAndClassProperty.property.setter",
"$s8mangling6curry1yyF" => "curry1()",
"$s8mangling3barSiyKF" => "bar()",
"$s8mangling12curry1ThrowsyyKF" => "curry1Throws()",
"$s8mangling12curry2ThrowsyycyKF" => "curry2Throws()",
"$s8mangling6curry3yyKcyF" => "curry3()",
"$s8mangling12curry3ThrowsyyKcyKF" => "curry3Throws()",
"$s8mangling14varargsVsArray3arr1nySid_SStF" => "varargsVsArray(arr: Int..., n: String)",
"$s8mangling14varargsVsArray3arr1nySaySiG_SStF" => "varargsVsArray(arr: [Int], n: String)",
"$s8mangling14varargsVsArray3arr1nySaySiGd_SStF" => "varargsVsArray(arr: [Int]..., n: String)",
// Swift 5.2
"$s7ranking22propertyVersusFunctionyyAA1P_p_xtAaCRzlFyAaC_pcAaC_pcfu_" => "implicit closure #1 (P) in propertyVersusFunction<A>(P, A)",
});
}
#[test]
fn test_demangle_swift_no_args() {
assert_demangle!(Language::Swift, DemangleOptions::name_only(), {
// Swift < 4 (old mangling)
"_T08mangling0022egbpdajGbuEbxfgehfvwxnyyF" => "ليهمابتكلموشعربي؟",
"_T08mangling0024ihqwcrbEcvIaIdqgAFGpqjyeyyF" => "他们为什么不说中文",
"_T08mangling0027ihqwctvzcJBfGFJdrssDxIboAybyyF" => "他們爲什麽不說中文",
"_T08mangling0030Proprostnemluvesky_uybCEdmaEBayyF" => "Pročprostěnemluvíčesky",
"_T08mangling9r13757744ySaySiG1x_tF" => "r13757744",
"_T08mangling9r13757744ySi1xd_tF" => "r13757744",
"_T08mangling2psopyxlF" => "+- prefix<A>",
"_T08mangling2psoPyxlF" => "+- postfix<A>",
"_T08mangling2psoiyx_xtlF" => "+- infix<A>",
"_T08mangling2psopyx1a_x1bt_tlF" => "+- prefix<A>",
"_T08mangling2psoPyx1a_x1bt_tlF" => "+- postfix<A>",
"_T08mangling007p_qcaDcoiS2i_SitF" => "«+» infix",
"_T08mangling12any_protocolyypF" => "any_protocol",
"_T08mangling12one_protocolyAA3Foo_pF" => "one_protocol",
"_T08mangling18one_protocol_twiceyAA3Foo_p_AaC_ptF" => "one_protocol_twice",
"_T08mangling12two_protocolyAA3Bar_AA3FoopF" => "two_protocol",
"_T08mangling3ZimC4zangyx_qd__tlF" => "Zim.zang<A>",
"_T08mangling3ZimC4zungyqd___xtlF" => "Zim.zung<A>",
"_T08mangling27single_protocol_compositionyAA3Foo_p1x_tF" => "single_protocol_composition",
"_T08mangling28uses_objc_class_and_protocolySo8NSObjectC1o_So8NSAnsing_p1ptF" => "uses_objc_class_and_protocol",
"_T08mangling17uses_clang_structySC6NSRectV1r_tF" => "uses_clang_struct",
"_T08mangling14uses_optionalss7UnicodeO6ScalarVSgSiSg1x_tF" => "uses_optionals",
"_T08mangling12GenericUnionO3FooACyxGSicAEmlF" => "GenericUnion.Foo<A>",
"_T08mangling10HasVarInitV5stateSbvpZfiSbyKXKfu_" => "implicit closure #1 in variable initialization expression of static HasVarInit.state",
"_T08mangling19autoClosureOverloadySiyXK1f_tF" => "autoClosureOverload",
"_T08mangling19autoClosureOverloadySiyc1f_tF" => "autoClosureOverload",
"_T08mangling24autoClosureOverloadCallsyyF" => "autoClosureOverloadCalls",
"_T08mangling4fooAyxAA12HasAssocTypeRzlF" => "fooA<A>",
"_T08mangling4fooByxAA12HasAssocTypeRzAA0D4Reqt0D0RpzlF" => "fooB<A>",
"_T08mangling2qqoiySi_SitF" => "?? infix",
"_T08mangling24InstanceAndClassPropertyV8propertySivg" => "InstanceAndClassProperty.property.getter",
"_T08mangling24InstanceAndClassPropertyV8propertySivs" => "InstanceAndClassProperty.property.setter",
"_T08mangling24InstanceAndClassPropertyV8propertySivgZ" => "static InstanceAndClassProperty.property.getter",
"_T08mangling24InstanceAndClassPropertyV8propertySivsZ" => "static InstanceAndClassProperty.property.setter",
"_T08mangling6curry1yyF" => "curry1",
"_T08mangling3barSiyKF" => "bar",
"_T08mangling12curry1ThrowsyyKF" => "curry1Throws",
"_T08mangling12curry2ThrowsyycyKF" => "curry2Throws",
"_T08mangling6curry3yyKcyF" => "curry3",
"_T08mangling12curry3ThrowsyyKcyKF" => "curry3Throws",
"_T08mangling14varargsVsArrayySi3arrd_SS1ntF" => "varargsVsArray",
"_T08mangling14varargsVsArrayySaySiG3arr_SS1ntF" => "varargsVsArray",
"_T08mangling14varargsVsArrayySaySiG3arrd_SS1ntF" => "varargsVsArray",
// Swift 4.2
"$S8mangling0022egbpdajGbuEbxfgehfvwxnyyF" => "ليهمابتكلموشعربي؟",
"$S8mangling0024ihqwcrbEcvIaIdqgAFGpqjyeyyF" => "他们为什么不说中文",
"$S8mangling0027ihqwctvzcJBfGFJdrssDxIboAybyyF" => "他們爲什麽不說中文",
"$S8mangling0030Proprostnemluvesky_uybCEdmaEBayyF" => "Pročprostěnemluvíčesky",
"$S8mangling9r137577441xySaySiG_tF" => "r13757744",
"$S8mangling9r137577441xySid_tF" => "r13757744",
"$S8mangling2psopyyxlF" => "+- prefix<A>",
"$S8mangling2psoPyyxlF" => "+- postfix<A>",
"$S8mangling2psoiyyx_xtlF" => "+- infix<A>",
"$S8mangling2psopyyx1a_x1bt_tlF" => "+- prefix<A>",
"$S8mangling2psoPyyx1a_x1bt_tlF" => "+- postfix<A>",
"$S8mangling007p_qcaDcoiyS2i_SitF" => "«+» infix",
"$S8mangling12any_protocolyyypF" => "any_protocol",
"$S8mangling12one_protocolyyAA3Foo_pF" => "one_protocol",
"$S8mangling18one_protocol_twiceyyAA3Foo_p_AaC_ptF" => "one_protocol_twice",
"$S8mangling12two_protocolyyAA3Bar_AA3FoopF" => "two_protocol",
"$S8mangling3ZimC4zangyyx_qd__tlF" => "Zim.zang<A>",
"$S8mangling3ZimC4zungyyqd___xtlF" => "Zim.zung<A>",
"$S8mangling28uses_objc_class_and_protocol1o1p2p2ySo8NSObjectC_So8NSAnsing_pSo14NSBetterAnsing_ptF" => "uses_objc_class_and_protocol",
"$S8mangling17uses_clang_struct1rySo6NSRectV_tF" => "uses_clang_struct",
"$S8mangling14uses_optionals1xs7UnicodeO6ScalarVSgSiSg_tF" => "uses_optionals",
"$S8mangling12GenericUnionO3FooyACyxGSicAEmlF" => "GenericUnion.Foo<A>",
"$S8mangling10HasVarInitV5stateSbvpZfiSbyKXKfu_" => "implicit closure #1 in variable initialization expression of static HasVarInit.state",
"$S8mangling19autoClosureOverload1fySiyXK_tF" => "autoClosureOverload",
"$S8mangling19autoClosureOverload1fySiyXE_tF" => "autoClosureOverload",
"$S8mangling24autoClosureOverloadCallsyyF" => "autoClosureOverloadCalls",
"$S8mangling4fooAyyxAA12HasAssocTypeRzlF" => "fooA<A>",
"$S8mangling4fooByyxAA12HasAssocTypeRzAA0D4Reqt0D0RpzlF" => "fooB<A>",
"$S8mangling2qqoiyySi_SitF" => "?? infix",
"$S8mangling24InstanceAndClassPropertyV8propertySivg" => "InstanceAndClassProperty.property.getter",
"$S8mangling24InstanceAndClassPropertyV8propertySivs" => "InstanceAndClassProperty.property.setter",
"$S8mangling24InstanceAndClassPropertyV8propertySivgZ" => "static InstanceAndClassProperty.property.getter",
"$S8mangling24InstanceAndClassPropertyV8propertySivsZ" => "static InstanceAndClassProperty.property.setter",
"$S8mangling6curry1yyF" => "curry1",
"$S8mangling3barSiyKF" => "bar",
"$S8mangling12curry1ThrowsyyKF" => "curry1Throws",
"$S8mangling12curry2ThrowsyycyKF" => "curry2Throws",
"$S8mangling6curry3yyKcyF" => "curry3",
"$S8mangling12curry3ThrowsyyKcyKF" => "curry3Throws",
"$S8mangling14varargsVsArray3arr1nySid_SStF" => "varargsVsArray",
"$S8mangling14varargsVsArray3arr1nySaySiG_SStF" => "varargsVsArray",
"$S8mangling14varargsVsArray3arr1nySaySiGd_SStF" => "varargsVsArray",
// Swift 5
"$s8mangling0022egbpdajGbuEbxfgehfvwxnyyF" => "ليهمابتكلموشعربي؟",
"$s8mangling0024ihqwcrbEcvIaIdqgAFGpqjyeyyF" => "他们为什么不说中文",
"$s8mangling0027ihqwctvzcJBfGFJdrssDxIboAybyyF" => "他們爲什麽不說中文",
"$s8mangling0030Proprostnemluvesky_uybCEdmaEBayyF" => "Pročprostěnemluvíčesky",
"$s8mangling9r137577441xySaySiG_tF" => "r13757744",
"$s8mangling9r137577441xySid_tF" => "r13757744",
"$s8mangling2psopyyxlF" => "+- prefix<A>",
"$s8mangling2psoPyyxlF" => "+- postfix<A>",
"$s8mangling2psoiyyx_xtlF" => "+- infix<A>",
"$s8mangling2psopyyx1a_x1bt_tlF" => "+- prefix<A>",
"$s8mangling2psoPyyx1a_x1bt_tlF" => "+- postfix<A>",
"$s8mangling007p_qcaDcoiyS2i_SitF" => "«+» infix",
"$s8mangling12any_protocolyyypF" => "any_protocol",
"$s8mangling12one_protocolyyAA3Foo_pF" => "one_protocol",
"$s8mangling18one_protocol_twiceyyAA3Foo_p_AaC_ptF" => "one_protocol_twice",
"$s8mangling12two_protocolyyAA3Bar_AA3FoopF" => "two_protocol",
"$s8mangling3ZimC4zangyyx_qd__tlF" => "Zim.zang<A>",
"$s8mangling3ZimC4zungyyqd___xtlF" => "Zim.zung<A>",
"$s8mangling28uses_objc_class_and_protocol1o1p2p2ySo8NSObjectC_So8NSAnsing_pSo14NSBetterAnsing_ptF" => "uses_objc_class_and_protocol",
"$s8mangling17uses_clang_struct1rySo6NSRectV_tF" => "uses_clang_struct",
"$s8mangling14uses_optionals1xs7UnicodeO6ScalarVSgSiSg_tF" => "uses_optionals",
"$s8mangling12GenericUnionO3FooyACyxGSicAEmlF" => "GenericUnion.Foo<A>",
"$s8mangling10HasVarInitV5stateSbvpZfiSbyKXKfu_" => "implicit closure #1 in variable initialization expression of static HasVarInit.state",
"$s8mangling19autoClosureOverload1fySiyXK_tF" => "autoClosureOverload",
"$s8mangling19autoClosureOverload1fySiyXE_tF" => "autoClosureOverload",
"$s8mangling24autoClosureOverloadCallsyyF" => "autoClosureOverloadCalls",
"$s8mangling4fooAyyxAA12HasAssocTypeRzlF" => "fooA<A>",
"$s8mangling4fooByyxAA12HasAssocTypeRzAA0D4Reqt0D0RpzlF" => "fooB<A>",
"$s8mangling2qqoiyySi_SitF" => "?? infix",
"$s8mangling24InstanceAndClassPropertyV8propertySivg" => "InstanceAndClassProperty.property.getter",
"$s8mangling24InstanceAndClassPropertyV8propertySivs" => "InstanceAndClassProperty.property.setter",
"$s8mangling24InstanceAndClassPropertyV8propertySivgZ" => "static InstanceAndClassProperty.property.getter",
"$s8mangling24InstanceAndClassPropertyV8propertySivsZ" => "static InstanceAndClassProperty.property.setter",
"$s8mangling6curry1yyF" => "curry1",
"$s8mangling3barSiyKF" => "bar",
"$s8mangling12curry1ThrowsyyKF" => "curry1Throws",
"$s8mangling12curry2ThrowsyycyKF" => "curry2Throws",
"$s8mangling6curry3yyKcyF" => "curry3",
"$s8mangling12curry3ThrowsyyKcyKF" => "curry3Throws",
"$s8mangling14varargsVsArray3arr1nySid_SStF" => "varargsVsArray",
"$s8mangling14varargsVsArray3arr1nySaySiG_SStF" => "varargsVsArray",
"$s8mangling14varargsVsArray3arr1nySaySiGd_SStF" => "varargsVsArray",
// Swift 5.2
"$s7ranking22propertyVersusFunctionyyAA1P_p_xtAaCRzlFyAaC_pcAaC_pcfu_" => "implicit closure #1 in propertyVersusFunction<A>",
});
}
| 71.955631 | 188 | 0.71579 |
22b22c7bd8affae8296c3bdfe3b218ba43586336
| 21,639 |
// Copyright 2015-2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! ECDSA Signatures using the P-256 and P-384 curves.
use super::digest_scalar::digest_scalar;
use crate::{
arithmetic::montgomery::*,
cpu, digest,
ec::{
self,
suite_b::{ops::*, private_key},
},
error,
io::der,
limb, pkcs8, rand, sealed, signature,
};
/// An ECDSA signing algorithm.
pub struct EcdsaSigningAlgorithm {
curve: &'static ec::Curve,
private_scalar_ops: &'static PrivateScalarOps,
private_key_ops: &'static PrivateKeyOps,
digest_alg: &'static digest::Algorithm,
pkcs8_template: &'static pkcs8::Template,
format_rs: fn(ops: &'static ScalarOps, r: &Scalar, s: &Scalar, out: &mut [u8]) -> usize,
id: AlgorithmID,
}
#[derive(Debug, Eq, PartialEq)]
enum AlgorithmID {
ECDSA_P256_SHA256_FIXED_SIGNING,
ECDSA_P384_SHA384_FIXED_SIGNING,
ECDSA_P256_SHA256_ASN1_SIGNING,
ECDSA_P384_SHA384_ASN1_SIGNING,
}
derive_debug_via_id!(EcdsaSigningAlgorithm);
impl PartialEq for EcdsaSigningAlgorithm {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for EcdsaSigningAlgorithm {}
impl sealed::Sealed for EcdsaSigningAlgorithm {}
/// An ECDSA key pair, used for signing.
pub struct EcdsaKeyPair {
d: Scalar<R>,
nonce_key: NonceRandomKey,
alg: &'static EcdsaSigningAlgorithm,
public_key: PublicKey,
}
derive_debug_via_field!(EcdsaKeyPair, stringify!(EcdsaKeyPair), public_key);
impl EcdsaKeyPair {
/// Generates a new key pair and returns the key pair serialized as a
/// PKCS#8 document.
///
/// The PKCS#8 document will be a v1 `OneAsymmetricKey` with the public key
/// included in the `ECPrivateKey` structure, as described in
/// [RFC 5958 Section 2] and [RFC 5915]. The `ECPrivateKey` structure will
/// not have a `parameters` field so the generated key is compatible with
/// PKCS#11.
///
/// [RFC 5915]: https://tools.ietf.org/html/rfc5915
/// [RFC 5958 Section 2]: https://tools.ietf.org/html/rfc5958#section-2
pub fn generate_pkcs8(
alg: &'static EcdsaSigningAlgorithm,
rng: &dyn rand::SecureRandom,
) -> Result<pkcs8::Document, error::Unspecified> {
let private_key = ec::Seed::generate(alg.curve, rng, cpu::features())?;
let public_key = private_key.compute_public_key()?;
Ok(pkcs8::wrap_key(
alg.pkcs8_template,
private_key.bytes_less_safe(),
public_key.as_ref(),
))
}
/// Constructs an ECDSA key pair by parsing an unencrypted PKCS#8 v1
/// id-ecPublicKey `ECPrivateKey` key.
///
/// The input must be in PKCS#8 v1 format. It must contain the public key in
/// the `ECPrivateKey` structure; `from_pkcs8()` will verify that the public
/// key and the private key are consistent with each other. The algorithm
/// identifier must identify the curve by name; it must not use an
/// "explicit" encoding of the curve. The `parameters` field of the
/// `ECPrivateKey`, if present, must be the same named curve that is in the
/// algorithm identifier in the PKCS#8 header.
pub fn from_pkcs8(
alg: &'static EcdsaSigningAlgorithm,
pkcs8: &[u8],
rng: &dyn rand::SecureRandom,
) -> Result<Self, error::KeyRejected> {
let key_pair = ec::suite_b::key_pair_from_pkcs8(
alg.curve,
alg.pkcs8_template,
untrusted::Input::from(pkcs8),
cpu::features(),
)?;
Self::new(alg, key_pair, rng)
}
/// Constructs an ECDSA key pair from the private key and public key bytes
///
/// The private key must encoded as a big-endian fixed-length integer. For
/// example, a P-256 private key must be 32 bytes prefixed with leading
/// zeros as needed.
///
/// The public key is encoding in uncompressed form using the
/// Octet-String-to-Elliptic-Curve-Point algorithm in
/// [SEC 1: Elliptic Curve Cryptography, Version 2.0].
///
/// This is intended for use by code that deserializes key pairs. It is
/// recommended to use `EcdsaKeyPair::from_pkcs8()` (with a PKCS#8-encoded
/// key) instead.
///
/// [SEC 1: Elliptic Curve Cryptography, Version 2.0]:
/// http://www.secg.org/sec1-v2.pdf
pub fn from_private_key_and_public_key(
alg: &'static EcdsaSigningAlgorithm,
private_key: &[u8],
public_key: &[u8],
rng: &dyn rand::SecureRandom,
) -> Result<Self, error::KeyRejected> {
let key_pair = ec::suite_b::key_pair_from_bytes(
alg.curve,
untrusted::Input::from(private_key),
untrusted::Input::from(public_key),
cpu::features(),
)?;
Self::new(alg, key_pair, rng)
}
fn new(
alg: &'static EcdsaSigningAlgorithm,
key_pair: ec::KeyPair,
rng: &dyn rand::SecureRandom,
) -> Result<Self, error::KeyRejected> {
let (seed, public_key) = key_pair.split();
let d = private_key::private_key_as_scalar(alg.private_key_ops, &seed);
let d = alg
.private_scalar_ops
.scalar_ops
.scalar_product(&d, &alg.private_scalar_ops.oneRR_mod_n);
let nonce_key = NonceRandomKey::new(alg, &seed, rng)?;
Ok(Self {
d,
nonce_key,
alg,
public_key: PublicKey(public_key),
})
}
/// Deprecated. Returns the signature of the `message` using a random nonce
/// generated by `rng`.
pub fn sign(
&self,
rng: &dyn rand::SecureRandom,
message: &[u8],
) -> Result<signature::Signature, error::Unspecified> {
// Step 4 (out of order).
let h = digest::digest(self.alg.digest_alg, message);
// Incorporate `h` into the nonce to hedge against faulty RNGs. (This
// is not an approved random number generator that is mandated in
// the spec.)
let nonce_rng = NonceRandom {
key: &self.nonce_key,
message_digest: &h,
rng,
};
self.sign_digest(h, &nonce_rng)
}
#[cfg(test)]
fn sign_with_fixed_nonce_during_test(
&self,
rng: &dyn rand::SecureRandom,
message: &[u8],
) -> Result<signature::Signature, error::Unspecified> {
// Step 4 (out of order).
let h = digest::digest(self.alg.digest_alg, message);
self.sign_digest(h, rng)
}
/// Returns the signature of message digest `h` using a "random" nonce
/// generated by `rng`.
fn sign_digest(
&self,
h: digest::Digest,
rng: &dyn rand::SecureRandom,
) -> Result<signature::Signature, error::Unspecified> {
// NSA Suite B Implementer's Guide to ECDSA Section 3.4.1: ECDSA
// Signature Generation.
// NSA Guide Prerequisites:
//
// Prior to generating an ECDSA signature, the signatory shall
// obtain:
//
// 1. an authentic copy of the domain parameters,
// 2. a digital signature key pair (d,Q), either generated by a
// method from Appendix A.1, or obtained from a trusted third
// party,
// 3. assurance of the validity of the public key Q (see Appendix
// A.3), and
// 4. assurance that he/she/it actually possesses the associated
// private key d (see [SP800-89] Section 6).
//
// The domain parameters are hard-coded into the source code.
// `EcdsaKeyPair::generate_pkcs8()` can be used to meet the second
// requirement; otherwise, it is up to the user to ensure the key pair
// was obtained from a trusted private key. The constructors for
// `EcdsaKeyPair` ensure that #3 and #4 are met subject to the caveats
// in SP800-89 Section 6.
let ops = self.alg.private_scalar_ops;
let scalar_ops = ops.scalar_ops;
let cops = scalar_ops.common;
let private_key_ops = self.alg.private_key_ops;
for _ in 0..100 {
// XXX: iteration conut?
// Step 1.
let k = private_key::random_scalar(self.alg.private_key_ops, rng)?;
let k_inv = scalar_ops.scalar_inv_to_mont(&k);
// Step 2.
let r = private_key_ops.point_mul_base(&k);
// Step 3.
let r = {
let (x, _) = private_key::affine_from_jacobian(private_key_ops, &r)?;
let x = cops.elem_unencoded(&x);
elem_reduced_to_scalar(cops, &x)
};
if cops.is_zero(&r) {
continue;
}
// Step 4 is done by the caller.
// Step 5.
let e = digest_scalar(scalar_ops, h);
// Step 6.
let s = {
let dr = scalar_ops.scalar_product(&self.d, &r);
let e_plus_dr = scalar_sum(cops, &e, &dr);
scalar_ops.scalar_product(&k_inv, &e_plus_dr)
};
if cops.is_zero(&s) {
continue;
}
// Step 7 with encoding.
return Ok(signature::Signature::new(|sig_bytes| {
(self.alg.format_rs)(scalar_ops, &r, &s, sig_bytes)
}));
}
Err(error::Unspecified)
}
}
/// Generates an ECDSA nonce in a way that attempts to protect against a faulty
/// `SecureRandom`.
struct NonceRandom<'a> {
key: &'a NonceRandomKey,
message_digest: &'a digest::Digest,
rng: &'a dyn rand::SecureRandom,
}
impl core::fmt::Debug for NonceRandom<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("NonceRandom").finish()
}
}
impl rand::sealed::SecureRandom for NonceRandom<'_> {
fn fill_impl(&self, dest: &mut [u8]) -> Result<(), error::Unspecified> {
// Use the same digest algorithm that will be used to digest the
// message. The digest algorithm's output is exactly the right size;
// this is checked below.
//
// XXX(perf): The single iteration will require two digest block
// operations because the amount of data digested is larger than one
// block.
let digest_alg = self.key.0.algorithm();
let mut ctx = digest::Context::new(digest_alg);
// Digest the randomized digest of the private key.
let key = self.key.0.as_ref();
ctx.update(key);
// The random value is digested between the key and the message so that
// the key and the message are not directly digested in the same digest
// block.
assert!(key.len() <= digest_alg.block_len / 2);
{
let mut rand = [0u8; digest::MAX_BLOCK_LEN];
let rand = &mut rand[..digest_alg.block_len - key.len()];
assert!(rand.len() >= dest.len());
self.rng.fill(rand)?;
ctx.update(rand);
}
ctx.update(self.message_digest.as_ref());
let nonce = ctx.finish();
// `copy_from_slice()` panics if the lengths differ, so we don't have
// to separately assert that the lengths are the same.
dest.copy_from_slice(nonce.as_ref());
Ok(())
}
}
impl<'a> sealed::Sealed for NonceRandom<'a> {}
struct NonceRandomKey(digest::Digest);
impl NonceRandomKey {
fn new(
alg: &EcdsaSigningAlgorithm,
seed: &ec::Seed,
rng: &dyn rand::SecureRandom,
) -> Result<Self, error::KeyRejected> {
let mut rand = [0; digest::MAX_OUTPUT_LEN];
let rand = &mut rand[0..alg.curve.elem_scalar_seed_len];
// XXX: `KeyRejected` isn't the right way to model failure of the RNG,
// but to fix that we'd need to break the API by changing the result type.
// TODO: Fix the API in the next breaking release.
rng.fill(rand)
.map_err(|error::Unspecified| error::KeyRejected::rng_failed())?;
let mut ctx = digest::Context::new(alg.digest_alg);
ctx.update(rand);
ctx.update(seed.bytes_less_safe());
Ok(Self(ctx.finish()))
}
}
impl signature::KeyPair for EcdsaKeyPair {
type PublicKey = PublicKey;
fn public_key(&self) -> &Self::PublicKey {
&self.public_key
}
}
#[derive(Clone, Copy)]
pub struct PublicKey(ec::PublicKey);
derive_debug_self_as_ref_hex_bytes!(PublicKey);
impl AsRef<[u8]> for PublicKey {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
fn format_rs_fixed(ops: &'static ScalarOps, r: &Scalar, s: &Scalar, out: &mut [u8]) -> usize {
let scalar_len = ops.scalar_bytes_len();
let (r_out, rest) = out.split_at_mut(scalar_len);
limb::big_endian_from_limbs(&r.limbs[..ops.common.num_limbs], r_out);
let (s_out, _) = rest.split_at_mut(scalar_len);
limb::big_endian_from_limbs(&s.limbs[..ops.common.num_limbs], s_out);
2 * scalar_len
}
fn format_rs_asn1(ops: &'static ScalarOps, r: &Scalar, s: &Scalar, out: &mut [u8]) -> usize {
// This assumes `a` is not zero since neither `r` or `s` is allowed to be
// zero.
fn format_integer_tlv(ops: &ScalarOps, a: &Scalar, out: &mut [u8]) -> usize {
let mut fixed = [0u8; ec::SCALAR_MAX_BYTES + 1];
let fixed = &mut fixed[..(ops.scalar_bytes_len() + 1)];
limb::big_endian_from_limbs(&a.limbs[..ops.common.num_limbs], &mut fixed[1..]);
// Since `a_fixed_out` is an extra byte long, it is guaranteed to start
// with a zero.
debug_assert_eq!(fixed[0], 0);
// There must be at least one non-zero byte since `a` isn't zero.
let first_index = fixed.iter().position(|b| *b != 0).unwrap();
// If the first byte has its high bit set, it needs to be prefixed with 0x00.
let first_index = if fixed[first_index] & 0x80 != 0 {
first_index - 1
} else {
first_index
};
let value = &fixed[first_index..];
out[0] = der::Tag::Integer as u8;
// Lengths less than 128 are encoded in one byte.
assert!(value.len() < 128);
out[1] = value.len() as u8;
out[2..][..value.len()].copy_from_slice(value);
2 + value.len()
}
out[0] = der::Tag::Sequence as u8;
let r_tlv_len = format_integer_tlv(ops, r, &mut out[2..]);
let s_tlv_len = format_integer_tlv(ops, s, &mut out[2..][r_tlv_len..]);
// Lengths less than 128 are encoded in one byte.
let value_len = r_tlv_len + s_tlv_len;
assert!(value_len < 128);
out[1] = value_len as u8;
2 + value_len
}
/// Signing of fixed-length (PKCS#11 style) ECDSA signatures using the
/// P-256 curve and SHA-256.
///
/// See "`ECDSA_*_FIXED` Details" in `ring::signature`'s module-level
/// documentation for more details.
pub static ECDSA_P256_SHA256_FIXED_SIGNING: EcdsaSigningAlgorithm = EcdsaSigningAlgorithm {
curve: &ec::suite_b::curve::P256,
private_scalar_ops: &p256::PRIVATE_SCALAR_OPS,
private_key_ops: &p256::PRIVATE_KEY_OPS,
digest_alg: &digest::SHA256,
pkcs8_template: &EC_PUBLIC_KEY_P256_PKCS8_V1_TEMPLATE,
format_rs: format_rs_fixed,
id: AlgorithmID::ECDSA_P256_SHA256_FIXED_SIGNING,
};
/// Signing of fixed-length (PKCS#11 style) ECDSA signatures using the
/// P-384 curve and SHA-384.
///
/// See "`ECDSA_*_FIXED` Details" in `ring::signature`'s module-level
/// documentation for more details.
pub static ECDSA_P384_SHA384_FIXED_SIGNING: EcdsaSigningAlgorithm = EcdsaSigningAlgorithm {
curve: &ec::suite_b::curve::P384,
private_scalar_ops: &p384::PRIVATE_SCALAR_OPS,
private_key_ops: &p384::PRIVATE_KEY_OPS,
digest_alg: &digest::SHA384,
pkcs8_template: &EC_PUBLIC_KEY_P384_PKCS8_V1_TEMPLATE,
format_rs: format_rs_fixed,
id: AlgorithmID::ECDSA_P384_SHA384_FIXED_SIGNING,
};
/// Signing of ASN.1 DER-encoded ECDSA signatures using the P-256 curve and
/// SHA-256.
///
/// See "`ECDSA_*_ASN1` Details" in `ring::signature`'s module-level
/// documentation for more details.
pub static ECDSA_P256_SHA256_ASN1_SIGNING: EcdsaSigningAlgorithm = EcdsaSigningAlgorithm {
curve: &ec::suite_b::curve::P256,
private_scalar_ops: &p256::PRIVATE_SCALAR_OPS,
private_key_ops: &p256::PRIVATE_KEY_OPS,
digest_alg: &digest::SHA256,
pkcs8_template: &EC_PUBLIC_KEY_P256_PKCS8_V1_TEMPLATE,
format_rs: format_rs_asn1,
id: AlgorithmID::ECDSA_P256_SHA256_ASN1_SIGNING,
};
/// Signing of ASN.1 DER-encoded ECDSA signatures using the P-384 curve and
/// SHA-384.
///
/// See "`ECDSA_*_ASN1` Details" in `ring::signature`'s module-level
/// documentation for more details.
pub static ECDSA_P384_SHA384_ASN1_SIGNING: EcdsaSigningAlgorithm = EcdsaSigningAlgorithm {
curve: &ec::suite_b::curve::P384,
private_scalar_ops: &p384::PRIVATE_SCALAR_OPS,
private_key_ops: &p384::PRIVATE_KEY_OPS,
digest_alg: &digest::SHA384,
pkcs8_template: &EC_PUBLIC_KEY_P384_PKCS8_V1_TEMPLATE,
format_rs: format_rs_asn1,
id: AlgorithmID::ECDSA_P384_SHA384_ASN1_SIGNING,
};
static EC_PUBLIC_KEY_P256_PKCS8_V1_TEMPLATE: pkcs8::Template = pkcs8::Template {
bytes: include_bytes!("ecPublicKey_p256_pkcs8_v1_template.der"),
alg_id_range: core::ops::Range { start: 8, end: 27 },
curve_id_index: 9,
private_key_index: 0x24,
};
static EC_PUBLIC_KEY_P384_PKCS8_V1_TEMPLATE: pkcs8::Template = pkcs8::Template {
bytes: include_bytes!("ecPublicKey_p384_pkcs8_v1_template.der"),
alg_id_range: core::ops::Range { start: 8, end: 24 },
curve_id_index: 9,
private_key_index: 0x23,
};
#[cfg(test)]
mod tests {
use crate::{rand, signature, test};
#[test]
fn signature_ecdsa_sign_fixed_test() {
let rng = rand::SystemRandom::new();
test::run(
test_file!("ecdsa_sign_fixed_tests.txt"),
|section, test_case| {
assert_eq!(section, "");
let curve_name = test_case.consume_string("Curve");
let digest_name = test_case.consume_string("Digest");
let msg = test_case.consume_bytes("Msg");
let d = test_case.consume_bytes("d");
let q = test_case.consume_bytes("Q");
let k = test_case.consume_bytes("k");
let expected_result = test_case.consume_bytes("Sig");
let alg = match (curve_name.as_str(), digest_name.as_str()) {
("P-256", "SHA256") => &signature::ECDSA_P256_SHA256_FIXED_SIGNING,
("P-384", "SHA384") => &signature::ECDSA_P384_SHA384_FIXED_SIGNING,
_ => {
panic!("Unsupported curve+digest: {}+{}", curve_name, digest_name);
}
};
let private_key =
signature::EcdsaKeyPair::from_private_key_and_public_key(alg, &d, &q, &rng)
.unwrap();
let rng = test::rand::FixedSliceRandom { bytes: &k };
let actual_result = private_key
.sign_with_fixed_nonce_during_test(&rng, &msg)
.unwrap();
assert_eq!(actual_result.as_ref(), &expected_result[..]);
Ok(())
},
);
}
#[test]
fn signature_ecdsa_sign_asn1_test() {
let rng = rand::SystemRandom::new();
test::run(
test_file!("ecdsa_sign_asn1_tests.txt"),
|section, test_case| {
assert_eq!(section, "");
let curve_name = test_case.consume_string("Curve");
let digest_name = test_case.consume_string("Digest");
let msg = test_case.consume_bytes("Msg");
let d = test_case.consume_bytes("d");
let q = test_case.consume_bytes("Q");
let k = test_case.consume_bytes("k");
let expected_result = test_case.consume_bytes("Sig");
let alg = match (curve_name.as_str(), digest_name.as_str()) {
("P-256", "SHA256") => &signature::ECDSA_P256_SHA256_ASN1_SIGNING,
("P-384", "SHA384") => &signature::ECDSA_P384_SHA384_ASN1_SIGNING,
_ => {
panic!("Unsupported curve+digest: {}+{}", curve_name, digest_name);
}
};
let private_key =
signature::EcdsaKeyPair::from_private_key_and_public_key(alg, &d, &q, &rng)
.unwrap();
let rng = test::rand::FixedSliceRandom { bytes: &k };
let actual_result = private_key
.sign_with_fixed_nonce_during_test(&rng, &msg)
.unwrap();
assert_eq!(actual_result.as_ref(), &expected_result[..]);
Ok(())
},
);
}
}
| 35.826159 | 95 | 0.611396 |
8a3b155ffb8d66986eef2d4ff53fbb8c369f46fd
| 27,735 |
#![allow(dead_code)]
use na::{
Isometry3, Matrix3, Matrix4, Point3, Quaternion, Rotation3, Translation3, Unit, UnitQuaternion,
Vector3,
};
use physx::cooking::{
ConvexMeshCookingResult, PxConvexMeshDesc, PxCooking, PxCookingParams, PxHeightFieldDesc,
PxTriangleMeshDesc, TriangleMeshCookingResult,
};
use physx::foundation::DefaultAllocator;
use physx::prelude::*;
use physx::scene::FrictionType;
use physx::traits::Class;
use physx_sys::{
FilterShaderCallbackInfo, PxBitAndByte, PxConvexFlags, PxConvexMeshGeometryFlags,
PxHeightFieldSample, PxMeshGeometryFlags, PxMeshScale_new, PxRigidActor,
};
use rapier::counters::Counters;
use rapier::dynamics::{
IntegrationParameters, JointParams, JointSet, RigidBodyHandle, RigidBodySet,
};
use rapier::geometry::{Collider, ColliderSet};
use rapier::utils::WBasis;
use std::collections::HashMap;
trait IntoNa {
type Output;
fn into_na(self) -> Self::Output;
}
impl IntoNa for glam::Mat4 {
type Output = Matrix4<f32>;
fn into_na(self) -> Self::Output {
self.to_cols_array_2d().into()
}
}
impl IntoNa for PxVec3 {
type Output = Vector3<f32>;
fn into_na(self) -> Self::Output {
Vector3::new(self.x(), self.y(), self.z())
}
}
impl IntoNa for PxQuat {
type Output = Quaternion<f32>;
fn into_na(self) -> Self::Output {
Quaternion::new(self.w(), self.x(), self.y(), self.z())
}
}
impl IntoNa for PxTransform {
type Output = Isometry3<f32>;
fn into_na(self) -> Self::Output {
let tra = self.translation().into_na();
let quat = self.rotation().into_na();
let unit_quat = Unit::new_unchecked(quat);
Isometry3::from_parts(tra.into(), unit_quat)
}
}
trait IntoPhysx {
type Output;
fn into_physx(self) -> Self::Output;
}
impl IntoPhysx for Vector3<f32> {
type Output = PxVec3;
fn into_physx(self) -> Self::Output {
PxVec3::new(self.x, self.y, self.z)
}
}
impl IntoPhysx for Point3<f32> {
type Output = PxVec3;
fn into_physx(self) -> Self::Output {
PxVec3::new(self.x, self.y, self.z)
}
}
impl IntoPhysx for Isometry3<f32> {
type Output = PxTransform;
fn into_physx(self) -> Self::Output {
self.into_glam().into()
}
}
trait IntoGlam {
type Output;
fn into_glam(self) -> Self::Output;
}
impl IntoGlam for Vector3<f32> {
type Output = glam::Vec3;
fn into_glam(self) -> Self::Output {
glam::vec3(self.x, self.y, self.z)
}
}
impl IntoGlam for Point3<f32> {
type Output = glam::Vec3;
fn into_glam(self) -> Self::Output {
glam::vec3(self.x, self.y, self.z)
}
}
impl IntoGlam for Matrix4<f32> {
type Output = glam::Mat4;
fn into_glam(self) -> Self::Output {
glam::Mat4::from_cols_array_2d(&self.into())
}
}
impl IntoGlam for Isometry3<f32> {
type Output = glam::Mat4;
fn into_glam(self) -> Self::Output {
glam::Mat4::from_cols_array_2d(&self.to_homogeneous().into())
}
}
thread_local! {
pub static FOUNDATION: std::cell::RefCell<PxPhysicsFoundation> = std::cell::RefCell::new(PhysicsFoundation::default());
}
pub struct PhysxWorld {
// physics: Physics,
// cooking: Cooking,
materials: Vec<Owner<PxMaterial>>,
shapes: Vec<Owner<PxShape>>,
scene: Option<Owner<PxScene>>,
}
impl Drop for PhysxWorld {
fn drop(&mut self) {
let scene = self.scene.take();
// FIXME: we get a segfault if we don't forget the scene.
std::mem::forget(scene);
}
}
impl PhysxWorld {
pub fn from_rapier(
gravity: Vector3<f32>,
integration_parameters: &IntegrationParameters,
bodies: &RigidBodySet,
colliders: &ColliderSet,
joints: &JointSet,
use_two_friction_directions: bool,
num_threads: usize,
) -> Self {
FOUNDATION.with(|physics| {
let mut physics = physics.borrow_mut();
let mut shapes = Vec::new();
let mut materials = Vec::new();
let friction_type = if use_two_friction_directions {
FrictionType::TwoDirectional
} else {
FrictionType::Patch
};
let mut scene_desc = SceneDescriptor {
gravity: gravity.into_physx(),
thread_count: num_threads as u32,
broad_phase_type: BroadPhaseType::AutomaticBoxPruning,
solver_type: SolverType::PGS,
friction_type,
ccd_max_passes: integration_parameters.max_ccd_substeps as u32,
..SceneDescriptor::new(())
};
let ccd_enabled = bodies.iter().any(|(_, rb)| rb.is_ccd_enabled());
if ccd_enabled {
scene_desc.simulation_filter_shader =
FilterShaderDescriptor::CallDefaultFirst(ccd_filter_shader);
scene_desc.flags.insert(SceneFlag::EnableCcd);
}
let mut scene: Owner<PxScene> = physics.create(scene_desc).unwrap();
let mut rapier2dynamic = HashMap::new();
let mut rapier2static = HashMap::new();
let cooking_params =
PxCookingParams::new(&*physics).expect("Failed to init PhysX cooking.");
let mut cooking = PxCooking::new(physics.foundation_mut(), &cooking_params)
.expect("Failed to init PhysX cooking");
/*
*
* Rigid bodies
*
*/
for (rapier_handle, rb) in bodies.iter() {
let pos = rb.position().into_physx();
if rb.is_dynamic() {
let mut actor = physics.create_dynamic(&pos, rapier_handle).unwrap();
let linvel = rb.linvel().into_physx();
let angvel = rb.angvel().into_physx();
actor.set_linear_velocity(&linvel, true);
actor.set_angular_velocity(&angvel, true);
actor.set_solver_iteration_counts(
integration_parameters.max_position_iterations as u32,
integration_parameters.max_velocity_iterations as u32,
);
rapier2dynamic.insert(rapier_handle, actor);
} else {
let actor = physics.create_static(pos, ()).unwrap();
rapier2static.insert(rapier_handle, actor);
}
}
/*
*
* Colliders
*
*/
for (_, collider) in colliders.iter() {
if let Some((mut px_shape, px_material, collider_pos)) =
physx_collider_from_rapier_collider(&mut *physics, &mut cooking, &collider)
{
let parent_body = &bodies[collider.parent()];
if !parent_body.is_dynamic() {
let actor = rapier2static.get_mut(&collider.parent()).unwrap();
actor.attach_shape(&mut px_shape);
} else {
let actor = rapier2dynamic.get_mut(&collider.parent()).unwrap();
actor.attach_shape(&mut px_shape);
}
unsafe {
let pose = collider_pos.into_physx();
physx_sys::PxShape_setLocalPose_mut(px_shape.as_mut_ptr(), &pose.into());
}
shapes.push(px_shape);
materials.push(px_material);
}
}
// Update mass properties and CCD flags.
for (rapier_handle, actor) in rapier2dynamic.iter_mut() {
let rb = &bodies[*rapier_handle];
let densities: Vec<_> = rb
.colliders()
.iter()
.map(|h| colliders[*h].density().unwrap_or(0.0))
.collect();
unsafe {
physx_sys::PxRigidBodyExt_updateMassAndInertia_mut(
std::mem::transmute(actor.as_mut()),
densities.as_ptr(),
densities.len() as u32,
std::ptr::null(),
false,
);
if rb.is_ccd_enabled() {
physx_sys::PxRigidBody_setRigidBodyFlag_mut(
std::mem::transmute(actor.as_mut()),
RigidBodyFlag::EnableCCD as u32,
true,
);
// physx_sys::PxRigidBody_setMinCCDAdvanceCoefficient_mut(
// std::mem::transmute(actor.as_mut()),
// 0.0,
// );
// physx_sys::PxRigidBody_setRigidBodyFlag_mut(
// std::mem::transmute(actor.as_mut()),
// RigidBodyFlag::EnableCCDFriction as u32,
// true,
// );
}
}
}
/*
*
* Joints
*
*/
Self::setup_joints(
&mut physics,
joints,
&mut rapier2static,
&mut rapier2dynamic,
);
for (_, actor) in rapier2static {
scene.add_static_actor(actor);
}
for (_, actor) in rapier2dynamic {
scene.add_dynamic_actor(actor);
}
Self {
scene: Some(scene),
shapes,
materials,
}
})
}
fn setup_joints(
physics: &mut PxPhysicsFoundation,
joints: &JointSet,
rapier2static: &mut HashMap<RigidBodyHandle, Owner<PxRigidStatic>>,
rapier2dynamic: &mut HashMap<RigidBodyHandle, Owner<PxRigidDynamic>>,
) {
unsafe {
for joint in joints.iter() {
let actor1 = rapier2static
.get_mut(&joint.1.body1)
.map(|act| &mut **act as *mut PxRigidStatic as *mut PxRigidActor)
.or(rapier2dynamic
.get_mut(&joint.1.body1)
.map(|act| &mut **act as *mut PxRigidDynamic as *mut PxRigidActor))
.unwrap();
let actor2 = rapier2static
.get_mut(&joint.1.body2)
.map(|act| &mut **act as *mut PxRigidStatic as *mut PxRigidActor)
.or(rapier2dynamic
.get_mut(&joint.1.body2)
.map(|act| &mut **act as *mut PxRigidDynamic as *mut PxRigidActor))
.unwrap();
match &joint.1.params {
JointParams::BallJoint(params) => {
let frame1 = Isometry3::new(params.local_anchor1.coords, na::zero())
.into_physx()
.into();
let frame2 = Isometry3::new(params.local_anchor2.coords, na::zero())
.into_physx()
.into();
physx_sys::phys_PxSphericalJointCreate(
physics.as_mut_ptr(),
actor1,
&frame1 as *const _,
actor2,
&frame2 as *const _,
);
}
JointParams::RevoluteJoint(params) => {
// NOTE: orthonormal_basis() returns the two basis vectors.
// However we only use one and recompute the other just to
// make sure our basis is right-handed.
let basis1a = params.local_axis1.orthonormal_basis()[0];
let basis2a = params.local_axis2.orthonormal_basis()[0];
let basis1b = params.local_axis1.cross(&basis1a);
let basis2b = params.local_axis2.cross(&basis2a);
let rotmat1 = Rotation3::from_matrix_unchecked(Matrix3::from_columns(&[
params.local_axis1.into_inner(),
basis1a,
basis1b,
]));
let rotmat2 = Rotation3::from_matrix_unchecked(Matrix3::from_columns(&[
params.local_axis2.into_inner(),
basis2a,
basis2b,
]));
let axisangle1 = rotmat1.scaled_axis();
let axisangle2 = rotmat2.scaled_axis();
let frame1 = Isometry3::new(params.local_anchor1.coords, axisangle1)
.into_physx()
.into();
let frame2 = Isometry3::new(params.local_anchor2.coords, axisangle2)
.into_physx()
.into();
let revolute_joint = physx_sys::phys_PxRevoluteJointCreate(
physics.as_mut_ptr(),
actor1,
&frame1 as *const _,
actor2,
&frame2 as *const _,
);
physx_sys::PxRevoluteJoint_setDriveVelocity_mut(
revolute_joint,
params.motor_target_vel,
true,
);
if params.motor_damping != 0.0 {
physx_sys::PxRevoluteJoint_setRevoluteJointFlag_mut(
revolute_joint,
physx_sys::PxRevoluteJointFlag::eDRIVE_ENABLED,
true,
);
}
}
JointParams::PrismaticJoint(params) => {
// NOTE: orthonormal_basis() returns the two basis vectors.
// However we only use one and recompute the other just to
// make sure our basis is right-handed.
let basis1a = params.local_axis1().orthonormal_basis()[0];
let basis2a = params.local_axis2().orthonormal_basis()[0];
let basis1b = params.local_axis1().cross(&basis1a);
let basis2b = params.local_axis2().cross(&basis2a);
let rotmat1 = Rotation3::from_matrix_unchecked(Matrix3::from_columns(&[
params.local_axis1().into_inner(),
basis1a,
basis1b,
]));
let rotmat2 = Rotation3::from_matrix_unchecked(Matrix3::from_columns(&[
params.local_axis2().into_inner(),
basis2a,
basis2b,
]));
let axisangle1 = rotmat1.scaled_axis();
let axisangle2 = rotmat2.scaled_axis();
let frame1 = Isometry3::new(params.local_anchor1.coords, axisangle1)
.into_physx()
.into();
let frame2 = Isometry3::new(params.local_anchor2.coords, axisangle2)
.into_physx()
.into();
let joint = physx_sys::phys_PxPrismaticJointCreate(
physics.as_mut_ptr(),
actor1,
&frame1 as *const _,
actor2,
&frame2 as *const _,
);
if params.limits_enabled {
let limits = physx_sys::PxJointLinearLimitPair {
restitution: 0.0,
bounceThreshold: 0.0,
stiffness: 0.0,
damping: 0.0,
contactDistance: 0.01,
lower: params.limits[0],
upper: params.limits[1],
};
physx_sys::PxPrismaticJoint_setLimit_mut(joint, &limits);
physx_sys::PxPrismaticJoint_setPrismaticJointFlag_mut(
joint,
physx_sys::PxPrismaticJointFlag::eLIMIT_ENABLED,
true,
);
}
}
JointParams::FixedJoint(params) => {
let frame1 = params.local_anchor1.into_physx().into();
let frame2 = params.local_anchor2.into_physx().into();
physx_sys::phys_PxFixedJointCreate(
physics.as_mut_ptr(),
actor1,
&frame1 as *const _,
actor2,
&frame2 as *const _,
);
} // JointParams::GenericJoint(_) => {
// eprintln!(
// "Joint type currently unsupported by the PhysX backend: GenericJoint."
// )
// }
}
}
}
}
pub fn step(&mut self, counters: &mut Counters, params: &IntegrationParameters) {
let mut scratch = unsafe { ScratchBuffer::new(4) };
counters.step_started();
self.scene
.as_mut()
.unwrap()
.step(
params.dt,
None::<&mut physx_sys::PxBaseTask>,
Some(&mut scratch),
true,
)
.expect("error occurred during PhysX simulation");
counters.step_completed();
}
pub fn sync(&mut self, bodies: &mut RigidBodySet, colliders: &mut ColliderSet) {
for actor in self.scene.as_mut().unwrap().get_dynamic_actors() {
let handle = actor.get_user_data();
let pos = actor.get_global_pose().into_na();
let rb = &mut bodies[*handle];
rb.set_position(pos, false);
for coll_handle in rb.colliders() {
let collider = &mut colliders[*coll_handle];
collider.set_position_debug(pos * collider.position_wrt_parent());
}
}
}
}
fn physx_collider_from_rapier_collider(
physics: &mut PxPhysicsFoundation,
cooking: &PxCooking,
collider: &Collider,
) -> Option<(Owner<PxShape>, Owner<PxMaterial>, Isometry3<f32>)> {
let mut local_pose = *collider.position_wrt_parent();
let shape = collider.shape();
let shape_flags = if collider.is_sensor() {
ShapeFlag::TriggerShape.into()
} else {
ShapeFlag::SimulationShape.into()
};
let mut material = physics
.create_material(
collider.friction,
collider.friction,
collider.restitution,
(),
)
.unwrap();
let materials = &mut [material.as_mut()][..];
let shape = if let Some(cuboid) = shape.as_cuboid() {
let geometry = PxBoxGeometry::new(
cuboid.half_extents.x,
cuboid.half_extents.y,
cuboid.half_extents.z,
);
physics.create_shape(&geometry, materials, true, shape_flags, ())
} else if let Some(ball) = shape.as_ball() {
let geometry = PxSphereGeometry::new(ball.radius);
physics.create_shape(&geometry, materials, true, shape_flags, ())
} else if let Some(capsule) = shape.as_capsule() {
let center = capsule.center();
let mut dir = capsule.segment.b - capsule.segment.a;
if dir.x < 0.0 {
dir = -dir;
}
let rot = UnitQuaternion::rotation_between(&Vector3::x(), &dir);
local_pose = local_pose
* Translation3::from(center.coords)
* rot.unwrap_or(UnitQuaternion::identity());
let geometry = PxCapsuleGeometry::new(capsule.radius, capsule.half_height());
physics.create_shape(&geometry, materials, true, shape_flags, ())
} else if let Some(heightfield) = shape.as_heightfield() {
let heights = heightfield.heights();
let scale = heightfield.scale();
local_pose = local_pose * Translation3::new(-scale.x / 2.0, 0.0, -scale.z / 2.0);
const Y_FACTOR: f32 = 1_000f32;
let mut heightfield_desc;
unsafe {
let samples: Vec<_> = heights
.iter()
.map(|h| PxHeightFieldSample {
height: (*h * Y_FACTOR) as i16,
materialIndex0: PxBitAndByte { mData: 0 },
materialIndex1: PxBitAndByte { mData: 0 },
})
.collect();
heightfield_desc = physx_sys::PxHeightFieldDesc_new();
heightfield_desc.nbRows = heights.nrows() as u32;
heightfield_desc.nbColumns = heights.ncols() as u32;
heightfield_desc.samples.stride = std::mem::size_of::<PxHeightFieldSample>() as u32;
heightfield_desc.samples.data = samples.as_ptr() as *const std::ffi::c_void;
}
let heightfield_desc = PxHeightFieldDesc {
obj: heightfield_desc,
};
let heightfield = cooking.create_height_field(physics, &heightfield_desc);
if let Some(mut heightfield) = heightfield {
let flags = PxMeshGeometryFlags {
mBits: physx_sys::PxMeshGeometryFlag::eDOUBLE_SIDED as u8,
};
let geometry = PxHeightFieldGeometry::new(
&mut *heightfield,
flags,
scale.y / Y_FACTOR,
scale.x / (heights.nrows() as f32 - 1.0),
scale.z / (heights.ncols() as f32 - 1.0),
);
physics.create_shape(&geometry, materials, true, shape_flags, ())
} else {
eprintln!("PhysX heightfield construction failed.");
return None;
}
} else if let Some(convex) = shape
.as_convex_polyhedron()
.or(shape.as_round_convex_polyhedron().map(|c| &c.base_shape))
{
let vertices = convex.points();
let mut convex_desc;
unsafe {
convex_desc = physx_sys::PxConvexMeshDesc_new();
convex_desc.points.count = vertices.len() as u32;
convex_desc.points.stride = (3 * std::mem::size_of::<f32>()) as u32;
convex_desc.points.data = vertices.as_ptr() as *const std::ffi::c_void;
convex_desc.flags = PxConvexFlags {
mBits: physx_sys::PxConvexFlag::eCOMPUTE_CONVEX as u16,
};
}
let convex_desc = PxConvexMeshDesc { obj: convex_desc };
let convex = cooking.create_convex_mesh(physics, &convex_desc);
if let ConvexMeshCookingResult::Success(mut convex) = convex {
let flags = PxConvexMeshGeometryFlags { mBits: 0 };
let scaling = unsafe { PxMeshScale_new() };
let geometry = PxConvexMeshGeometry::new(&mut convex, &scaling, flags);
physics.create_shape(&geometry, materials, true, shape_flags, ())
} else {
eprintln!("PhysX convex mesh construction failed.");
return None;
}
} else if let Some(trimesh) = shape.as_trimesh() {
let vertices = trimesh.vertices();
let indices = trimesh.flat_indices();
let mut mesh_desc;
unsafe {
mesh_desc = physx_sys::PxTriangleMeshDesc_new();
mesh_desc.points.count = trimesh.vertices().len() as u32;
mesh_desc.points.stride = (3 * std::mem::size_of::<f32>()) as u32;
mesh_desc.points.data = vertices.as_ptr() as *const std::ffi::c_void;
mesh_desc.triangles.count = (indices.len() as u32) / 3;
mesh_desc.triangles.stride = (3 * std::mem::size_of::<u32>()) as u32;
mesh_desc.triangles.data = indices.as_ptr() as *const std::ffi::c_void;
}
let mesh_desc = PxTriangleMeshDesc { obj: mesh_desc };
let trimesh = cooking.create_triangle_mesh(physics, &mesh_desc);
if let TriangleMeshCookingResult::Success(mut trimesh) = trimesh {
let flags = PxMeshGeometryFlags {
mBits: physx_sys::PxMeshGeometryFlag::eDOUBLE_SIDED as u8,
};
let scaling = unsafe { PxMeshScale_new() };
let geometry = PxTriangleMeshGeometry::new(&mut trimesh, &scaling, flags);
physics.create_shape(&geometry, materials, true, shape_flags, ())
} else {
eprintln!("PhysX triangle mesh construction failed.");
return None;
}
} else {
eprintln!("Creating a shape unknown to the PhysX backend.");
return None;
};
shape.map(|s| (s, material, local_pose))
}
type PxPhysicsFoundation = PhysicsFoundation<DefaultAllocator, PxShape>;
type PxMaterial = physx::material::PxMaterial<()>;
type PxShape = physx::shape::PxShape<(), PxMaterial>;
type PxArticulationLink = physx::articulation_link::PxArticulationLink<(), PxShape>;
type PxRigidStatic = physx::rigid_static::PxRigidStatic<(), PxShape>;
type PxRigidDynamic = physx::rigid_dynamic::PxRigidDynamic<RigidBodyHandle, PxShape>;
type PxArticulation = physx::articulation::PxArticulation<(), PxArticulationLink>;
type PxArticulationReducedCoordinate =
physx::articulation_reduced_coordinate::PxArticulationReducedCoordinate<(), PxArticulationLink>;
type PxScene = physx::scene::PxScene<
(),
PxArticulationLink,
PxRigidStatic,
PxRigidDynamic,
PxArticulation,
PxArticulationReducedCoordinate,
OnCollision,
OnTrigger,
OnConstraintBreak,
OnWakeSleep,
OnAdvance,
>;
/// Next up, the simulation event callbacks need to be defined, and possibly an
/// allocator callback as well.
struct OnCollision;
impl CollisionCallback for OnCollision {
fn on_collision(
&mut self,
_header: &physx_sys::PxContactPairHeader,
_pairs: &[physx_sys::PxContactPair],
) {
}
}
struct OnTrigger;
impl TriggerCallback for OnTrigger {
fn on_trigger(&mut self, _pairs: &[physx_sys::PxTriggerPair]) {}
}
struct OnConstraintBreak;
impl ConstraintBreakCallback for OnConstraintBreak {
fn on_constraint_break(&mut self, _constraints: &[physx_sys::PxConstraintInfo]) {}
}
struct OnWakeSleep;
impl WakeSleepCallback<PxArticulationLink, PxRigidStatic, PxRigidDynamic> for OnWakeSleep {
fn on_wake_sleep(
&mut self,
_actors: &[&physx::actor::ActorMap<PxArticulationLink, PxRigidStatic, PxRigidDynamic>],
_is_waking: bool,
) {
}
}
struct OnAdvance;
impl AdvanceCallback<PxArticulationLink, PxRigidDynamic> for OnAdvance {
fn on_advance(
&self,
_actors: &[&physx::rigid_body::RigidBodyMap<PxArticulationLink, PxRigidDynamic>],
_transforms: &[PxTransform],
) {
}
}
unsafe extern "C" fn ccd_filter_shader(data: *mut FilterShaderCallbackInfo) -> u16 {
(*(*data).pairFlags).mBits |= physx_sys::PxPairFlag::eDETECT_CCD_CONTACT as u16;
0
}
| 37.837653 | 119 | 0.52349 |
d54e1a947e7fba10039db06ae3747884a072aa5d
| 1,194 |
/**
* 16756. Pismo
*
* 작성자: xCrypt0r
* 언어: Rust 2018
* 사용 메모리: 14,588 KB
* 소요 시간: 12 ms
* 해결 날짜: 2020년 10월 8일
*/
#![allow(non_snake_case)]
use std::io::{self, Write};
macro_rules! get_line {
( $( $t: ty ),+ ) => {
{
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
let mut iter = line.split_whitespace();
( $( iter.next().unwrap().parse::<$t>().unwrap() ),+ )
}
}
}
macro_rules! get_list {
( $t: ty ) => {
{
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
let list: Vec<$t> = line
.split_whitespace()
.map(|s| s.parse::<$t>().unwrap())
.collect();
list
}
}
}
fn main() {
let stdout = io::stdout();
let mut out = io::BufWriter::new(stdout.lock());
let N = get_line!(usize);
let v = get_list!(i32);
let mut min = std::i32::MAX;
for i in 0..N - 1 {
let interval = (v[i + 1] - v[i]).abs();
if min > interval {
min = interval;
}
}
writeln!(out, "{}", min).unwrap();
}
| 19.258065 | 66 | 0.446399 |
fe0d6a7bacafb85569c0cf88c31ac41798c78912
| 9,613 |
//! Implementation of different kinds of layers.
use super::traits::{Layer, WeightedLayer};
use super::utils::{dot, normal_vector};
pub struct LayerOut {
pub inputs: Vec<f32>,
pub output: Vec<f32>,
}
pub struct LayerUpdates {
pub ws: Vec<f32>,
pub bs: Vec<f32>,
}
pub struct DenseLayer {
pub weights: Vec<f32>,
pub bias: Vec<f32>,
/// (inputs per neuron, number of neurons)
pub shape: (usize, usize),
}
impl DenseLayer {
pub fn uniform(val: f32, inputs: usize, neurons: usize) -> DenseLayer {
DenseLayer {
weights: vec!(val; inputs * neurons),
bias: vec!(val; neurons),
shape: (inputs, neurons),
}
}
pub fn random(inputs: usize, neurons: usize) -> DenseLayer {
DenseLayer {
weights: normal_vector(inputs * neurons),
bias: normal_vector(neurons),
shape: (inputs, neurons),
}
}
}
impl Layer for DenseLayer {
fn input_count(self: &DenseLayer) -> usize {
self.shape.0
}
fn output_count(self: &DenseLayer) -> usize {
self.shape.1
}
/// Output of this layer is a vector of weight and input dot products.
fn output(self: &DenseLayer, inputs: &[f32]) -> Vec<f32> {
assert_eq!(self.shape.0, inputs.len());
let neuron_weights = self.weights.chunks(self.shape.0);
let mut out: Vec<f32> = Vec::new();
for (i, w) in neuron_weights.enumerate() {
out.push(dot(w, inputs) + self.bias[i]);
}
out
}
fn delta_from_inputs(self: &DenseLayer, delta: &[f32], inputs: &[f32]) -> Option<Vec<f32>> {
assert_eq!(self.shape.0, inputs.len());
assert_eq!(self.shape.1, delta.len());
let mut result: Vec<f32> = vec!(0.0; self.shape.0);
let neuron_weights = self.weights.chunks(self.shape.0);
for (d, nw) in delta.iter().zip(neuron_weights) {
for (i, w) in nw.iter().enumerate() {
result[i] += d * w;
}
}
Some(result)
}
/// Vector of derivatives with respect to the weights for each
/// neuron. Returns a vector with the same logical dimensions as
/// the layer's shape.
fn derivw(self: &DenseLayer, inputs: &[f32]) -> Option<Vec<f32>> {
assert_eq!(self.shape.0, inputs.len());
let mut derivs: Vec<f32> = Vec::new();
derivs.reserve(self.shape.0 * self.shape.1);
for _ in 0..self.shape.1 {
for i in inputs {
derivs.push(*i);
}
}
Some(derivs)
}
}
impl WeightedLayer for DenseLayer {
fn weight_count(self: &DenseLayer) -> usize {
self.weights.len()
}
fn neuron_count(self: &DenseLayer) -> usize {
self.output_count()
}
fn weights_mut(self: &mut DenseLayer) -> Option<&mut Vec<f32>> {
Some(&mut self.weights)
}
fn bias_mut(self: &mut DenseLayer) -> Option<&mut Vec<f32>> {
Some(&mut self.bias)
}
}
pub struct HyperbolicLayer {
pub size: usize,
}
impl WeightedLayer for HyperbolicLayer {
fn weight_count(&self) -> usize {
0
}
fn neuron_count(&self) -> usize {
0
}
fn weights_mut(self: &mut HyperbolicLayer) -> Option<&mut Vec<f32>> {
None
}
fn bias_mut(self: &mut HyperbolicLayer) -> Option<&mut Vec<f32>> {
None
}
}
impl Layer for HyperbolicLayer {
fn input_count(self: &HyperbolicLayer) -> usize {
self.size
}
fn output_count(self: &HyperbolicLayer) -> usize {
self.size
}
fn output(self: &HyperbolicLayer, inputs: &[f32]) -> Vec<f32> {
let mut out: Vec<f32> = Vec::new();
for x in inputs {
out.push(x.tanh());
}
out
}
/// y = tanh(x) and dy / dx = 1 - y^2
fn delta_from_outputs(self: &HyperbolicLayer,
delta: &[f32],
outputs: &[f32])
-> Option<Vec<f32>> {
assert_eq!(self.size, outputs.len());
assert_eq!(self.size, delta.len());
let mut derivs: Vec<f32> = vec![0.0; self.size];
for ((d, y), yd) in delta.iter().zip(outputs).zip(derivs.iter_mut()) {
*yd = d * (1.0 - y * y);
}
Some(derivs)
}
}
pub struct SigmoidLayer {
pub size: usize,
}
impl Layer for SigmoidLayer {
fn input_count(self: &SigmoidLayer) -> usize {
self.size
}
fn output_count(self: &SigmoidLayer) -> usize {
self.size
}
fn output(self: &SigmoidLayer, inputs: &[f32]) -> Vec<f32> {
let mut out: Vec<f32> = Vec::new();
for x in inputs {
out.push(1.0 / (1.0 + (-x).exp()));
}
out
}
/// dy / dx = y ( 1 - y )
fn delta_from_outputs(self: &SigmoidLayer, delta: &[f32], outputs: &[f32]) -> Option<Vec<f32>> {
assert_eq!(self.size, outputs.len());
assert_eq!(self.size, delta.len());
let mut derivs: Vec<f32> = vec![0.0; self.size];
for ((d, y), yd) in delta.iter().zip(outputs).zip(derivs.iter_mut()) {
*yd = d * (y * (1.0 - y));
}
Some(derivs)
}
}
impl WeightedLayer for SigmoidLayer {
fn weight_count(&self) -> usize {
0
}
fn neuron_count(&self) -> usize {
0
}
fn weights_mut(self: &mut SigmoidLayer) -> Option<&mut Vec<f32>> {
None
}
fn bias_mut(self: &mut SigmoidLayer) -> Option<&mut Vec<f32>> {
None
}
}
pub struct RectifiedLayer {
pub size: usize,
}
impl WeightedLayer for RectifiedLayer {
fn weight_count(&self) -> usize {
0
}
fn neuron_count(&self) -> usize {
0
}
fn weights_mut(self: &mut RectifiedLayer) -> Option<&mut Vec<f32>> {
None
}
fn bias_mut(self: &mut RectifiedLayer) -> Option<&mut Vec<f32>> {
None
}
}
impl Layer for RectifiedLayer {
fn input_count(self: &RectifiedLayer) -> usize {
self.size
}
fn output_count(self: &RectifiedLayer) -> usize {
self.size
}
fn output(self: &RectifiedLayer, inputs: &[f32]) -> Vec<f32> {
let mut out: Vec<f32> = Vec::new();
for x in inputs {
out.push(if *x < 0.0 { 0.0 } else { *x });
}
out
}
/// dy / dx = sigmoid function
fn delta_from_inputs(self: &RectifiedLayer, delta: &[f32], inputs: &[f32]) -> Option<Vec<f32>> {
assert_eq!(self.size, inputs.len());
assert_eq!(self.size, delta.len());
let mut derivs: Vec<f32> = Vec::new();
for (d, x) in delta.iter().zip(inputs) {
derivs.push(d * (1.0 / (1.0 + (-x).exp())));
}
Some(derivs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use traits::Layer;
#[test]
fn dense_output() {
// Input shape is two, layer contains three neurons, output
// will thus be three
let w = vec![0.5, 2.0, -1.0, 0.5, 2.0, 3.0];
let b = vec![0.1, 0.2, 0.3];
let l = DenseLayer {
weights: w,
bias: b,
shape: (2, 3),
};
assert_eq!(l.output(&vec![1.0, -1.0]), vec![-1.4, -1.3, -0.7]);
}
#[test]
fn dense_delta_from_inputs() {
let w = vec![0.5, 2.0, -1.0, 0.5, 2.0, 3.0];
let b = vec![0.1, 0.2, 0.3];
let l = DenseLayer {
weights: w,
bias: b,
shape: (2, 3),
};
let x = vec![1.0, 2.0];
assert_eq!(l.delta_from_inputs(&vec![1.0, 1.0, 1.0], &x),
Some(vec![1.5, 5.5]));
}
#[test]
fn dense_derivw() {
let w = vec![0.5, 2.0, -1.0, 0.5, 2.0, 3.0];
let b = vec![0.1, 0.2, 0.3];
let l = DenseLayer {
weights: w,
bias: b,
shape: (2, 3),
};
let x = vec![1.0, 2.0];
assert_eq!(l.derivw(&x), Some(vec![1.0, 2.0, 1.0, 2.0, 1.0, 2.0]));
}
#[test]
fn hyperbolic_output() {
let l = HyperbolicLayer { size: 5 };
let expected = vec![-1.0, -0.7615942, 0.0, 0.7615942, 1.0];
assert_eq!(l.output(&vec![-999999.0, -1.0, 0.0, 1.0, 999999.0]),
expected);
}
#[test]
fn hyperbolic_derivo() {
let l = HyperbolicLayer { size: 3 };
let expected = vec![1.0, 0.0, -3.0];
assert_eq!(l.delta_from_outputs(&vec![1.0, 1.0, 1.0], &vec![0.0, 1.0, 2.0]),
Some(expected));
}
#[test]
fn sigmoid_output() {
let l = SigmoidLayer { size: 5 };
let expected = vec![0.0, 0.26894143, 0.5, 0.7310586, 1.0];
assert_eq!(l.output(&vec![-999999.0, -1.0, 0.0, 1.0, 999999.0]),
expected);
}
#[test]
fn sigmoid_delta_from_outputs() {
let l = SigmoidLayer { size: 3 };
let expected = vec![0.0, 0.25, 0.0];
assert_eq!(l.delta_from_outputs(&vec![1.0, 1.0, 1.0], &vec![0.0, 0.5, 1.0]),
Some(expected));
}
#[test]
fn rectified_output() {
let l = RectifiedLayer { size: 5 };
let expected = vec![0.0, 0.0, 0.0, 1.0, 999.0];
assert_eq!(l.output(&vec![-999999.0, -1.0, 0.0, 1.0, 999.0]), expected);
}
#[test]
fn rectified_delta_from_inputs() {
let l = RectifiedLayer { size: 5 };
let expected = vec![0.0, 0.26894143, 0.5, 0.7310586, 1.0];
assert_eq!(l.delta_from_inputs(&vec![1.0, 1.0, 1.0, 1.0, 1.0],
&vec![-999999.0, -1.0, 0.0, 1.0, 999.0]),
Some(expected));
}
}
| 26.336986 | 100 | 0.513055 |
abc1cec24e493569b7f2564c7978e36ea1a7839b
| 2,615 |
//! Provides components and systems to create an in game user interface.
#![warn(
missing_debug_implementations,
missing_docs,
rust_2018_idioms,
rust_2018_compatibility
)]
#![warn(clippy::all)]
#![allow(clippy::new_without_default)]
pub use self::{
blink::BlinkSystem,
bundle::UiBundle,
button::{
UiButton, UiButtonAction, UiButtonActionRetrigger, UiButtonActionRetriggerSystem,
UiButtonActionRetriggerSystemDesc, UiButtonActionType, UiButtonBuilder,
UiButtonBuilderResources, UiButtonSystem, UiButtonSystemDesc,
},
drag::{DragWidgetSystemDesc, Draggable},
event::{
targeted, targeted_below, Interactable, TargetedEvent, UiEvent, UiEventType, UiMouseSystem,
},
event_retrigger::{
EventReceiver, EventRetrigger, EventRetriggerSystem, EventRetriggerSystemDesc,
},
font::{
default::get_default_font,
systemfont::{default_system_font, get_all_font_handles, list_system_font_families},
},
format::{FontAsset, FontHandle, TtfFormat},
glyphs::{UiGlyphsSystem, UiGlyphsSystemDesc},
image::UiImage,
label::{UiLabel, UiLabelBuilder, UiLabelBuilderResources},
layout::{Anchor, ScaleMode, Stretch, UiTransformSystem, UiTransformSystemDesc},
pass::{DrawUi, DrawUiDesc, RenderUi},
prefab::{
NoCustomUi, ToNativeWidget, UiButtonData, UiCreator, UiFormat, UiImagePrefab, UiLoader,
UiLoaderSystem, UiLoaderSystemDesc, UiPrefab, UiTextData, UiTransformData, UiWidget,
},
resize::{ResizeSystem, ResizeSystemDesc, UiResize},
selection::{
Selectable, Selected, SelectionKeyboardSystem, SelectionKeyboardSystemDesc,
SelectionMouseSystem, SelectionMouseSystemDesc,
},
selection_order_cache::{CacheSelectionOrderSystem, CachedSelectionOrder},
sound::{
UiPlaySoundAction, UiSoundRetrigger, UiSoundRetriggerSystem, UiSoundRetriggerSystemDesc,
UiSoundSystem, UiSoundSystemDesc,
},
text::{LineMode, TextEditing, TextEditingMouseSystem, TextEditingMouseSystemDesc, UiText},
text_editing::{TextEditingInputSystem, TextEditingInputSystemDesc},
transform::{get_parent_pixel_size, UiFinder, UiTransform},
widgets::{Widget, WidgetId, Widgets},
};
pub(crate) use amethyst_core::ecs::prelude::Entity;
pub(crate) use paste;
mod blink;
mod bundle;
mod button;
mod drag;
mod event;
mod event_retrigger;
mod font;
mod format;
mod glyphs;
mod image;
mod label;
mod layout;
mod pass;
mod prefab;
mod resize;
mod selection;
mod selection_order_cache;
mod sound;
mod text;
mod text_editing;
mod transform;
mod widgets;
| 31.890244 | 99 | 0.744168 |
9b1a7a50201b022aee19c636107c7dc3acf0b27a
| 72,593 |
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::{Block, Crate, DeclLocal, ExprMac, PatMac};
use ast::{Local, Ident, Mac_, Name};
use ast::{ItemMac, MacStmtWithSemicolon, Mrk, Stmt, StmtDecl, StmtMac};
use ast::{StmtExpr, StmtSemi};
use ast::TokenTree;
use ast;
use ext::mtwt;
use ext::build::AstBuilder;
use attr;
use attr::AttrMetaMethods;
use codemap;
use codemap::{Span, Spanned, ExpnInfo, NameAndSpan, MacroBang, MacroAttribute};
use ext::base::*;
use feature_gate::{self, Features, GatedCfg};
use fold;
use fold::*;
use parse;
use parse::token::{fresh_mark, fresh_name, intern};
use ptr::P;
use util::small_vector::SmallVector;
use visit;
use visit::Visitor;
use std_inject;
use std::collections::HashSet;
pub fn expand_expr(e: P<ast::Expr>, fld: &mut MacroExpander) -> P<ast::Expr> {
let expr_span = e.span;
return e.and_then(|ast::Expr {id, node, span}| match node {
// expr_mac should really be expr_ext or something; it's the
// entry-point for all syntax extensions.
ast::ExprMac(mac) => {
let expanded_expr = match expand_mac_invoc(mac, span,
|r| r.make_expr(),
mark_expr, fld) {
Some(expr) => expr,
None => {
return DummyResult::raw_expr(span);
}
};
// Keep going, outside-in.
let fully_expanded = fld.fold_expr(expanded_expr);
let span = fld.new_span(span);
fld.cx.bt_pop();
fully_expanded.map(|e| ast::Expr {
id: ast::DUMMY_NODE_ID,
node: e.node,
span: span,
})
}
ast::ExprInPlace(placer, value_expr) => {
// Ensure feature-gate is enabled
feature_gate::check_for_placement_in(
fld.cx.ecfg.features,
&fld.cx.parse_sess.span_diagnostic,
expr_span);
let placer = fld.fold_expr(placer);
let value_expr = fld.fold_expr(value_expr);
fld.cx.expr(span, ast::ExprInPlace(placer, value_expr))
}
ast::ExprWhile(cond, body, opt_ident) => {
let cond = fld.fold_expr(cond);
let (body, opt_ident) = expand_loop_block(body, opt_ident, fld);
fld.cx.expr(span, ast::ExprWhile(cond, body, opt_ident))
}
ast::ExprWhileLet(pat, expr, body, opt_ident) => {
let pat = fld.fold_pat(pat);
let expr = fld.fold_expr(expr);
// Hygienic renaming of the body.
let ((body, opt_ident), mut rewritten_pats) =
rename_in_scope(vec![pat],
fld,
(body, opt_ident),
|rename_fld, fld, (body, opt_ident)| {
expand_loop_block(rename_fld.fold_block(body), opt_ident, fld)
});
assert!(rewritten_pats.len() == 1);
fld.cx.expr(span, ast::ExprWhileLet(rewritten_pats.remove(0), expr, body, opt_ident))
}
ast::ExprLoop(loop_block, opt_ident) => {
let (loop_block, opt_ident) = expand_loop_block(loop_block, opt_ident, fld);
fld.cx.expr(span, ast::ExprLoop(loop_block, opt_ident))
}
ast::ExprForLoop(pat, head, body, opt_ident) => {
let pat = fld.fold_pat(pat);
// Hygienic renaming of the for loop body (for loop binds its pattern).
let ((body, opt_ident), mut rewritten_pats) =
rename_in_scope(vec![pat],
fld,
(body, opt_ident),
|rename_fld, fld, (body, opt_ident)| {
expand_loop_block(rename_fld.fold_block(body), opt_ident, fld)
});
assert!(rewritten_pats.len() == 1);
let head = fld.fold_expr(head);
fld.cx.expr(span, ast::ExprForLoop(rewritten_pats.remove(0), head, body, opt_ident))
}
ast::ExprIfLet(pat, sub_expr, body, else_opt) => {
let pat = fld.fold_pat(pat);
// Hygienic renaming of the body.
let (body, mut rewritten_pats) =
rename_in_scope(vec![pat],
fld,
body,
|rename_fld, fld, body| {
fld.fold_block(rename_fld.fold_block(body))
});
assert!(rewritten_pats.len() == 1);
let else_opt = else_opt.map(|else_opt| fld.fold_expr(else_opt));
let sub_expr = fld.fold_expr(sub_expr);
fld.cx.expr(span, ast::ExprIfLet(rewritten_pats.remove(0), sub_expr, body, else_opt))
}
ast::ExprClosure(capture_clause, fn_decl, block) => {
let (rewritten_fn_decl, rewritten_block)
= expand_and_rename_fn_decl_and_block(fn_decl, block, fld);
let new_node = ast::ExprClosure(capture_clause,
rewritten_fn_decl,
rewritten_block);
P(ast::Expr{id:id, node: new_node, span: fld.new_span(span)})
}
_ => {
P(noop_fold_expr(ast::Expr {
id: id,
node: node,
span: span
}, fld))
}
});
}
/// Expand a (not-ident-style) macro invocation. Returns the result
/// of expansion and the mark which must be applied to the result.
/// Our current interface doesn't allow us to apply the mark to the
/// result until after calling make_expr, make_items, etc.
fn expand_mac_invoc<T, F, G>(mac: ast::Mac,
span: codemap::Span,
parse_thunk: F,
mark_thunk: G,
fld: &mut MacroExpander)
-> Option<T> where
F: for<'a> FnOnce(Box<MacResult+'a>) -> Option<T>,
G: FnOnce(T, Mrk) -> T,
{
// it would almost certainly be cleaner to pass the whole
// macro invocation in, rather than pulling it apart and
// marking the tts and the ctxt separately. This also goes
// for the other three macro invocation chunks of code
// in this file.
let Mac_ { path: pth, tts, .. } = mac.node;
if pth.segments.len() > 1 {
fld.cx.span_err(pth.span,
"expected macro name without module \
separators");
// let compilation continue
return None;
}
let extname = pth.segments[0].identifier.name;
match fld.cx.syntax_env.find(extname) {
None => {
fld.cx.span_err(
pth.span,
&format!("macro undefined: '{}!'",
&extname));
// let compilation continue
None
}
Some(rc) => match *rc {
NormalTT(ref expandfun, exp_span, allow_internal_unstable) => {
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: exp_span,
allow_internal_unstable: allow_internal_unstable,
},
});
let fm = fresh_mark();
let marked_before = mark_tts(&tts[..], fm);
// The span that we pass to the expanders we want to
// be the root of the call stack. That's the most
// relevant span and it's the actual invocation of
// the macro.
let mac_span = fld.cx.original_span();
let opt_parsed = {
let expanded = expandfun.expand(fld.cx,
mac_span,
&marked_before[..]);
parse_thunk(expanded)
};
let parsed = match opt_parsed {
Some(e) => e,
None => {
fld.cx.span_err(
pth.span,
&format!("non-expression macro in expression position: {}",
extname
));
return None;
}
};
Some(mark_thunk(parsed,fm))
}
_ => {
fld.cx.span_err(
pth.span,
&format!("'{}' is not a tt-style macro",
extname));
None
}
}
}
}
/// Rename loop label and expand its loop body
///
/// The renaming procedure for loop is different in the sense that the loop
/// body is in a block enclosed by loop head so the renaming of loop label
/// must be propagated to the enclosed context.
fn expand_loop_block(loop_block: P<Block>,
opt_ident: Option<Ident>,
fld: &mut MacroExpander) -> (P<Block>, Option<Ident>) {
match opt_ident {
Some(label) => {
let new_label = fresh_name(label);
let rename = (label, new_label);
// The rename *must not* be added to the pending list of current
// syntax context otherwise an unrelated `break` or `continue` in
// the same context will pick that up in the deferred renaming pass
// and be renamed incorrectly.
let mut rename_list = vec!(rename);
let mut rename_fld = IdentRenamer{renames: &mut rename_list};
let renamed_ident = rename_fld.fold_ident(label);
// The rename *must* be added to the enclosed syntax context for
// `break` or `continue` to pick up because by definition they are
// in a block enclosed by loop head.
fld.cx.syntax_env.push_frame();
fld.cx.syntax_env.info().pending_renames.push(rename);
let expanded_block = expand_block_elts(loop_block, fld);
fld.cx.syntax_env.pop_frame();
(expanded_block, Some(renamed_ident))
}
None => (fld.fold_block(loop_block), opt_ident)
}
}
// eval $e with a new exts frame.
// must be a macro so that $e isn't evaluated too early.
macro_rules! with_exts_frame {
($extsboxexpr:expr,$macros_escape:expr,$e:expr) =>
({$extsboxexpr.push_frame();
$extsboxexpr.info().macros_escape = $macros_escape;
let result = $e;
$extsboxexpr.pop_frame();
result
})
}
// When we enter a module, record it, for the sake of `module!`
pub fn expand_item(it: P<ast::Item>, fld: &mut MacroExpander)
-> SmallVector<P<ast::Item>> {
let it = expand_item_multi_modifier(Annotatable::Item(it), fld);
expand_annotatable(it, fld)
.into_iter().map(|i| i.expect_item()).collect()
}
/// Expand item_underscore
fn expand_item_underscore(item: ast::Item_, fld: &mut MacroExpander) -> ast::Item_ {
match item {
ast::ItemFn(decl, unsafety, constness, abi, generics, body) => {
let (rewritten_fn_decl, rewritten_body)
= expand_and_rename_fn_decl_and_block(decl, body, fld);
let expanded_generics = fold::noop_fold_generics(generics,fld);
ast::ItemFn(rewritten_fn_decl, unsafety, constness, abi,
expanded_generics, rewritten_body)
}
_ => noop_fold_item_underscore(item, fld)
}
}
// does this attribute list contain "macro_use" ?
fn contains_macro_use(fld: &mut MacroExpander, attrs: &[ast::Attribute]) -> bool {
for attr in attrs {
let mut is_use = attr.check_name("macro_use");
if attr.check_name("macro_escape") {
fld.cx.span_warn(attr.span, "macro_escape is a deprecated synonym for macro_use");
is_use = true;
if let ast::AttrStyle::Inner = attr.node.style {
fld.cx.fileline_help(attr.span, "consider an outer attribute, \
#[macro_use] mod ...");
}
};
if is_use {
match attr.node.value.node {
ast::MetaWord(..) => (),
_ => fld.cx.span_err(attr.span, "arguments to macro_use are not allowed here"),
}
return true;
}
}
false
}
// Support for item-position macro invocations, exactly the same
// logic as for expression-position macro invocations.
pub fn expand_item_mac(it: P<ast::Item>,
fld: &mut MacroExpander) -> SmallVector<P<ast::Item>> {
let (extname, path_span, tts, span, attrs, ident) = it.and_then(|it| match it.node {
ItemMac(codemap::Spanned { node: Mac_ { path, tts, .. }, .. }) =>
(path.segments[0].identifier.name, path.span, tts, it.span, it.attrs, it.ident),
_ => fld.cx.span_bug(it.span, "invalid item macro invocation")
});
let fm = fresh_mark();
let items = {
let expanded = match fld.cx.syntax_env.find(extname) {
None => {
fld.cx.span_err(path_span,
&format!("macro undefined: '{}!'",
extname));
// let compilation continue
return SmallVector::zero();
}
Some(rc) => match *rc {
NormalTT(ref expander, tt_span, allow_internal_unstable) => {
if ident.name != parse::token::special_idents::invalid.name {
fld.cx
.span_err(path_span,
&format!("macro {}! expects no ident argument, given '{}'",
extname,
ident));
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
});
// mark before expansion:
let marked_before = mark_tts(&tts[..], fm);
expander.expand(fld.cx, span, &marked_before[..])
}
IdentTT(ref expander, tt_span, allow_internal_unstable) => {
if ident.name == parse::token::special_idents::invalid.name {
fld.cx.span_err(path_span,
&format!("macro {}! expects an ident argument",
extname));
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
});
// mark before expansion:
let marked_tts = mark_tts(&tts[..], fm);
expander.expand(fld.cx, span, ident, marked_tts)
}
MacroRulesTT => {
if ident.name == parse::token::special_idents::invalid.name {
fld.cx.span_err(path_span, "macro_rules! expects an ident argument");
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: None,
// `macro_rules!` doesn't directly allow
// unstable (this is orthogonal to whether
// the macro it creates allows it)
allow_internal_unstable: false,
}
});
// DON'T mark before expansion.
let allow_internal_unstable = attr::contains_name(&attrs,
"allow_internal_unstable");
// ensure any #[allow_internal_unstable]s are
// detected (including nested macro definitions
// etc.)
if allow_internal_unstable && !fld.cx.ecfg.enable_allow_internal_unstable() {
feature_gate::emit_feature_err(
&fld.cx.parse_sess.span_diagnostic,
"allow_internal_unstable",
span,
feature_gate::GateIssue::Language,
feature_gate::EXPLAIN_ALLOW_INTERNAL_UNSTABLE)
}
let export = attr::contains_name(&attrs, "macro_export");
let def = ast::MacroDef {
ident: ident,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
span: span,
imported_from: None,
export: export,
use_locally: true,
allow_internal_unstable: allow_internal_unstable,
body: tts,
};
fld.cx.insert_macro(def);
// macro_rules! has a side effect but expands to nothing.
fld.cx.bt_pop();
return SmallVector::zero();
}
_ => {
fld.cx.span_err(span,
&format!("{}! is not legal in item position",
extname));
return SmallVector::zero();
}
}
};
expanded.make_items()
};
let items = match items {
Some(items) => {
items.into_iter()
.map(|i| mark_item(i, fm))
.flat_map(|i| fld.fold_item(i).into_iter())
.collect()
}
None => {
fld.cx.span_err(path_span,
&format!("non-item macro in item position: {}",
extname));
return SmallVector::zero();
}
};
fld.cx.bt_pop();
items
}
/// Expand a stmt
fn expand_stmt(stmt: P<Stmt>, fld: &mut MacroExpander) -> SmallVector<P<Stmt>> {
let stmt = stmt.and_then(|stmt| stmt);
let (mac, style) = match stmt.node {
StmtMac(mac, style) => (mac, style),
_ => return expand_non_macro_stmt(stmt, fld)
};
let maybe_new_items =
expand_mac_invoc(mac.and_then(|m| m), stmt.span,
|r| r.make_stmts(),
|stmts, mark| stmts.move_map(|m| mark_stmt(m, mark)),
fld);
let mut fully_expanded = match maybe_new_items {
Some(stmts) => {
// Keep going, outside-in.
let new_items = stmts.into_iter().flat_map(|s| {
fld.fold_stmt(s).into_iter()
}).collect();
fld.cx.bt_pop();
new_items
}
None => SmallVector::zero()
};
// If this is a macro invocation with a semicolon, then apply that
// semicolon to the final statement produced by expansion.
if style == MacStmtWithSemicolon {
if let Some(stmt) = fully_expanded.pop() {
let new_stmt = stmt.map(|Spanned {node, span}| {
Spanned {
node: match node {
StmtExpr(e, stmt_id) => StmtSemi(e, stmt_id),
_ => node /* might already have a semi */
},
span: span
}
});
fully_expanded.push(new_stmt);
}
}
fully_expanded
}
// expand a non-macro stmt. this is essentially the fallthrough for
// expand_stmt, above.
fn expand_non_macro_stmt(Spanned {node, span: stmt_span}: Stmt, fld: &mut MacroExpander)
-> SmallVector<P<Stmt>> {
// is it a let?
match node {
StmtDecl(decl, node_id) => decl.and_then(|Spanned {node: decl, span}| match decl {
DeclLocal(local) => {
// take it apart:
let rewritten_local = local.map(|Local {id, pat, ty, init, span}| {
// expand the ty since TyFixedLengthVec contains an Expr
// and thus may have a macro use
let expanded_ty = ty.map(|t| fld.fold_ty(t));
// expand the pat (it might contain macro uses):
let expanded_pat = fld.fold_pat(pat);
// find the PatIdents in the pattern:
// oh dear heaven... this is going to include the enum
// names, as well... but that should be okay, as long as
// the new names are gensyms for the old ones.
// generate fresh names, push them to a new pending list
let idents = pattern_bindings(&expanded_pat);
let mut new_pending_renames =
idents.iter().map(|ident| (*ident, fresh_name(*ident))).collect();
// rewrite the pattern using the new names (the old
// ones have already been applied):
let rewritten_pat = {
// nested binding to allow borrow to expire:
let mut rename_fld = IdentRenamer{renames: &mut new_pending_renames};
rename_fld.fold_pat(expanded_pat)
};
// add them to the existing pending renames:
fld.cx.syntax_env.info().pending_renames
.extend(new_pending_renames);
Local {
id: id,
ty: expanded_ty,
pat: rewritten_pat,
// also, don't forget to expand the init:
init: init.map(|e| fld.fold_expr(e)),
span: span
}
});
SmallVector::one(P(Spanned {
node: StmtDecl(P(Spanned {
node: DeclLocal(rewritten_local),
span: span
}),
node_id),
span: stmt_span
}))
}
_ => {
noop_fold_stmt(Spanned {
node: StmtDecl(P(Spanned {
node: decl,
span: span
}),
node_id),
span: stmt_span
}, fld)
}
}),
_ => {
noop_fold_stmt(Spanned {
node: node,
span: stmt_span
}, fld)
}
}
}
// expand the arm of a 'match', renaming for macro hygiene
fn expand_arm(arm: ast::Arm, fld: &mut MacroExpander) -> ast::Arm {
// expand pats... they might contain macro uses:
let expanded_pats = arm.pats.move_map(|pat| fld.fold_pat(pat));
if expanded_pats.is_empty() {
panic!("encountered match arm with 0 patterns");
}
// apply renaming and then expansion to the guard and the body:
let ((rewritten_guard, rewritten_body), rewritten_pats) =
rename_in_scope(expanded_pats,
fld,
(arm.guard, arm.body),
|rename_fld, fld, (ag, ab)|{
let rewritten_guard = ag.map(|g| fld.fold_expr(rename_fld.fold_expr(g)));
let rewritten_body = fld.fold_expr(rename_fld.fold_expr(ab));
(rewritten_guard, rewritten_body)
});
ast::Arm {
attrs: fold::fold_attrs(arm.attrs, fld),
pats: rewritten_pats,
guard: rewritten_guard,
body: rewritten_body,
}
}
fn rename_in_scope<X, F>(pats: Vec<P<ast::Pat>>,
fld: &mut MacroExpander,
x: X,
f: F)
-> (X, Vec<P<ast::Pat>>)
where F: Fn(&mut IdentRenamer, &mut MacroExpander, X) -> X
{
// all of the pats must have the same set of bindings, so use the
// first one to extract them and generate new names:
let idents = pattern_bindings(&pats[0]);
let new_renames = idents.into_iter().map(|id| (id, fresh_name(id))).collect();
// apply the renaming, but only to the PatIdents:
let mut rename_pats_fld = PatIdentRenamer{renames:&new_renames};
let rewritten_pats = pats.move_map(|pat| rename_pats_fld.fold_pat(pat));
let mut rename_fld = IdentRenamer{ renames:&new_renames };
(f(&mut rename_fld, fld, x), rewritten_pats)
}
/// A visitor that extracts the PatIdent (binding) paths
/// from a given thingy and puts them in a mutable
/// array
#[derive(Clone)]
struct PatIdentFinder {
ident_accumulator: Vec<ast::Ident>
}
impl<'v> Visitor<'v> for PatIdentFinder {
fn visit_pat(&mut self, pattern: &ast::Pat) {
match *pattern {
ast::Pat { id: _, node: ast::PatIdent(_, ref path1, ref inner), span: _ } => {
self.ident_accumulator.push(path1.node);
// visit optional subpattern of PatIdent:
if let Some(ref subpat) = *inner {
self.visit_pat(subpat)
}
}
// use the default traversal for non-PatIdents
_ => visit::walk_pat(self, pattern)
}
}
}
/// find the PatIdent paths in a pattern
fn pattern_bindings(pat: &ast::Pat) -> Vec<ast::Ident> {
let mut name_finder = PatIdentFinder{ident_accumulator:Vec::new()};
name_finder.visit_pat(pat);
name_finder.ident_accumulator
}
/// find the PatIdent paths in a
fn fn_decl_arg_bindings(fn_decl: &ast::FnDecl) -> Vec<ast::Ident> {
let mut pat_idents = PatIdentFinder{ident_accumulator:Vec::new()};
for arg in &fn_decl.inputs {
pat_idents.visit_pat(&arg.pat);
}
pat_idents.ident_accumulator
}
// expand a block. pushes a new exts_frame, then calls expand_block_elts
pub fn expand_block(blk: P<Block>, fld: &mut MacroExpander) -> P<Block> {
// see note below about treatment of exts table
with_exts_frame!(fld.cx.syntax_env,false,
expand_block_elts(blk, fld))
}
// expand the elements of a block.
pub fn expand_block_elts(b: P<Block>, fld: &mut MacroExpander) -> P<Block> {
b.map(|Block {id, stmts, expr, rules, span}| {
let new_stmts = stmts.into_iter().flat_map(|x| {
// perform all pending renames
let renamed_stmt = {
let pending_renames = &mut fld.cx.syntax_env.info().pending_renames;
let mut rename_fld = IdentRenamer{renames:pending_renames};
rename_fld.fold_stmt(x).expect_one("rename_fold didn't return one value")
};
// expand macros in the statement
fld.fold_stmt(renamed_stmt).into_iter()
}).collect();
let new_expr = expr.map(|x| {
let expr = {
let pending_renames = &mut fld.cx.syntax_env.info().pending_renames;
let mut rename_fld = IdentRenamer{renames:pending_renames};
rename_fld.fold_expr(x)
};
fld.fold_expr(expr)
});
Block {
id: fld.new_id(id),
stmts: new_stmts,
expr: new_expr,
rules: rules,
span: span
}
})
}
fn expand_pat(p: P<ast::Pat>, fld: &mut MacroExpander) -> P<ast::Pat> {
match p.node {
PatMac(_) => {}
_ => return noop_fold_pat(p, fld)
}
p.map(|ast::Pat {node, span, ..}| {
let (pth, tts) = match node {
PatMac(mac) => (mac.node.path, mac.node.tts),
_ => unreachable!()
};
if pth.segments.len() > 1 {
fld.cx.span_err(pth.span, "expected macro name without module separators");
return DummyResult::raw_pat(span);
}
let extname = pth.segments[0].identifier.name;
let marked_after = match fld.cx.syntax_env.find(extname) {
None => {
fld.cx.span_err(pth.span,
&format!("macro undefined: '{}!'",
extname));
// let compilation continue
return DummyResult::raw_pat(span);
}
Some(rc) => match *rc {
NormalTT(ref expander, tt_span, allow_internal_unstable) => {
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
});
let fm = fresh_mark();
let marked_before = mark_tts(&tts[..], fm);
let mac_span = fld.cx.original_span();
let pat = expander.expand(fld.cx,
mac_span,
&marked_before[..]).make_pat();
let expanded = match pat {
Some(e) => e,
None => {
fld.cx.span_err(
pth.span,
&format!(
"non-pattern macro in pattern position: {}",
extname
)
);
return DummyResult::raw_pat(span);
}
};
// mark after:
mark_pat(expanded,fm)
}
_ => {
fld.cx.span_err(span,
&format!("{}! is not legal in pattern position",
extname));
return DummyResult::raw_pat(span);
}
}
};
let fully_expanded =
fld.fold_pat(marked_after).node.clone();
fld.cx.bt_pop();
ast::Pat {
id: ast::DUMMY_NODE_ID,
node: fully_expanded,
span: span
}
})
}
/// A tree-folder that applies every rename in its (mutable) list
/// to every identifier, including both bindings and varrefs
/// (and lots of things that will turn out to be neither)
pub struct IdentRenamer<'a> {
renames: &'a mtwt::RenameList,
}
impl<'a> Folder for IdentRenamer<'a> {
fn fold_ident(&mut self, id: Ident) -> Ident {
Ident::new(id.name, mtwt::apply_renames(self.renames, id.ctxt))
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
fold::noop_fold_mac(mac, self)
}
}
/// A tree-folder that applies every rename in its list to
/// the idents that are in PatIdent patterns. This is more narrowly
/// focused than IdentRenamer, and is needed for FnDecl,
/// where we want to rename the args but not the fn name or the generics etc.
pub struct PatIdentRenamer<'a> {
renames: &'a mtwt::RenameList,
}
impl<'a> Folder for PatIdentRenamer<'a> {
fn fold_pat(&mut self, pat: P<ast::Pat>) -> P<ast::Pat> {
match pat.node {
ast::PatIdent(..) => {},
_ => return noop_fold_pat(pat, self)
}
pat.map(|ast::Pat {id, node, span}| match node {
ast::PatIdent(binding_mode, Spanned{span: sp, node: ident}, sub) => {
let new_ident = Ident::new(ident.name,
mtwt::apply_renames(self.renames, ident.ctxt));
let new_node =
ast::PatIdent(binding_mode,
Spanned{span: self.new_span(sp), node: new_ident},
sub.map(|p| self.fold_pat(p)));
ast::Pat {
id: id,
node: new_node,
span: self.new_span(span)
}
},
_ => unreachable!()
})
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
fold::noop_fold_mac(mac, self)
}
}
fn expand_annotatable(a: Annotatable,
fld: &mut MacroExpander)
-> SmallVector<Annotatable> {
let a = expand_item_multi_modifier(a, fld);
let mut decorator_items = SmallVector::zero();
let mut new_attrs = Vec::new();
expand_decorators(a.clone(), fld, &mut decorator_items, &mut new_attrs);
let mut new_items: SmallVector<Annotatable> = match a {
Annotatable::Item(it) => match it.node {
ast::ItemMac(..) => {
expand_item_mac(it, fld).into_iter().map(|i| Annotatable::Item(i)).collect()
}
ast::ItemMod(_) | ast::ItemForeignMod(_) => {
let valid_ident =
it.ident.name != parse::token::special_idents::invalid.name;
if valid_ident {
fld.cx.mod_push(it.ident);
}
let macro_use = contains_macro_use(fld, &new_attrs[..]);
let result = with_exts_frame!(fld.cx.syntax_env,
macro_use,
noop_fold_item(it, fld));
if valid_ident {
fld.cx.mod_pop();
}
result.into_iter().map(|i| Annotatable::Item(i)).collect()
},
_ => {
let it = P(ast::Item {
attrs: new_attrs,
..(*it).clone()
});
noop_fold_item(it, fld).into_iter().map(|i| Annotatable::Item(i)).collect()
}
},
Annotatable::TraitItem(it) => match it.node {
ast::MethodTraitItem(_, Some(_)) => SmallVector::one(it.map(|ti| ast::TraitItem {
id: ti.id,
ident: ti.ident,
attrs: ti.attrs,
node: match ti.node {
ast::MethodTraitItem(sig, Some(body)) => {
let (sig, body) = expand_and_rename_method(sig, body, fld);
ast::MethodTraitItem(sig, Some(body))
}
_ => unreachable!()
},
span: fld.new_span(ti.span)
})),
_ => fold::noop_fold_trait_item(it, fld)
}.into_iter().map(Annotatable::TraitItem).collect(),
Annotatable::ImplItem(ii) => {
expand_impl_item(ii, fld).into_iter().map(Annotatable::ImplItem).collect()
}
};
new_items.push_all(decorator_items);
new_items
}
// Partition a set of attributes into one kind of attribute, and other kinds.
macro_rules! partition {
($fn_name: ident, $variant: ident) => {
#[allow(deprecated)] // The `allow` is needed because the `Modifier` variant might be used.
fn $fn_name(attrs: &[ast::Attribute],
fld: &MacroExpander)
-> (Vec<ast::Attribute>, Vec<ast::Attribute>) {
attrs.iter().cloned().partition(|attr| {
match fld.cx.syntax_env.find(intern(&attr.name())) {
Some(rc) => match *rc {
$variant(..) => true,
_ => false
},
_ => false
}
})
}
}
}
partition!(multi_modifiers, MultiModifier);
fn expand_decorators(a: Annotatable,
fld: &mut MacroExpander,
decorator_items: &mut SmallVector<Annotatable>,
new_attrs: &mut Vec<ast::Attribute>)
{
for attr in a.attrs() {
let mname = intern(&attr.name());
match fld.cx.syntax_env.find(mname) {
Some(rc) => match *rc {
MultiDecorator(ref dec) => {
attr::mark_used(&attr);
fld.cx.bt_push(ExpnInfo {
call_site: attr.span,
callee: NameAndSpan {
format: MacroAttribute(mname),
span: Some(attr.span),
// attributes can do whatever they like,
// for now.
allow_internal_unstable: true,
}
});
// we'd ideally decorator_items.push_all(expand_annotatable(ann, fld)),
// but that double-mut-borrows fld
let mut items: SmallVector<Annotatable> = SmallVector::zero();
dec.expand(fld.cx,
attr.span,
&attr.node.value,
&a,
&mut |ann| items.push(ann));
decorator_items.extend(items.into_iter()
.flat_map(|ann| expand_annotatable(ann, fld).into_iter()));
fld.cx.bt_pop();
}
_ => new_attrs.push((*attr).clone()),
},
_ => new_attrs.push((*attr).clone()),
}
}
}
fn expand_item_multi_modifier(mut it: Annotatable,
fld: &mut MacroExpander)
-> Annotatable {
let (modifiers, other_attrs) = multi_modifiers(it.attrs(), fld);
// Update the attrs, leave everything else alone. Is this mutation really a good idea?
it = it.fold_attrs(other_attrs);
if modifiers.is_empty() {
return it
}
for attr in &modifiers {
let mname = intern(&attr.name());
match fld.cx.syntax_env.find(mname) {
Some(rc) => match *rc {
MultiModifier(ref mac) => {
attr::mark_used(attr);
fld.cx.bt_push(ExpnInfo {
call_site: attr.span,
callee: NameAndSpan {
format: MacroAttribute(mname),
span: Some(attr.span),
// attributes can do whatever they like,
// for now
allow_internal_unstable: true,
}
});
it = mac.expand(fld.cx, attr.span, &*attr.node.value, it);
fld.cx.bt_pop();
}
_ => unreachable!()
},
_ => unreachable!()
}
}
// Expansion may have added new ItemModifiers.
expand_item_multi_modifier(it, fld)
}
fn expand_impl_item(ii: P<ast::ImplItem>, fld: &mut MacroExpander)
-> SmallVector<P<ast::ImplItem>> {
match ii.node {
ast::ImplItemKind::Method(..) => SmallVector::one(ii.map(|ii| ast::ImplItem {
id: ii.id,
ident: ii.ident,
attrs: ii.attrs,
vis: ii.vis,
node: match ii.node {
ast::ImplItemKind::Method(sig, body) => {
let (sig, body) = expand_and_rename_method(sig, body, fld);
ast::ImplItemKind::Method(sig, body)
}
_ => unreachable!()
},
span: fld.new_span(ii.span)
})),
ast::ImplItemKind::Macro(_) => {
let (span, mac) = ii.and_then(|ii| match ii.node {
ast::ImplItemKind::Macro(mac) => (ii.span, mac),
_ => unreachable!()
});
let maybe_new_items =
expand_mac_invoc(mac, span,
|r| r.make_impl_items(),
|meths, mark| meths.move_map(|m| mark_impl_item(m, mark)),
fld);
match maybe_new_items {
Some(impl_items) => {
// expand again if necessary
let new_items = impl_items.into_iter().flat_map(|ii| {
expand_impl_item(ii, fld).into_iter()
}).collect();
fld.cx.bt_pop();
new_items
}
None => SmallVector::zero()
}
}
_ => fold::noop_fold_impl_item(ii, fld)
}
}
/// Given a fn_decl and a block and a MacroExpander, expand the fn_decl, then use the
/// PatIdents in its arguments to perform renaming in the FnDecl and
/// the block, returning both the new FnDecl and the new Block.
fn expand_and_rename_fn_decl_and_block(fn_decl: P<ast::FnDecl>, block: P<ast::Block>,
fld: &mut MacroExpander)
-> (P<ast::FnDecl>, P<ast::Block>) {
let expanded_decl = fld.fold_fn_decl(fn_decl);
let idents = fn_decl_arg_bindings(&expanded_decl);
let renames =
idents.iter().map(|id| (*id,fresh_name(*id))).collect();
// first, a renamer for the PatIdents, for the fn_decl:
let mut rename_pat_fld = PatIdentRenamer{renames: &renames};
let rewritten_fn_decl = rename_pat_fld.fold_fn_decl(expanded_decl);
// now, a renamer for *all* idents, for the body:
let mut rename_fld = IdentRenamer{renames: &renames};
let rewritten_body = fld.fold_block(rename_fld.fold_block(block));
(rewritten_fn_decl,rewritten_body)
}
fn expand_and_rename_method(sig: ast::MethodSig, body: P<ast::Block>,
fld: &mut MacroExpander)
-> (ast::MethodSig, P<ast::Block>) {
let (rewritten_fn_decl, rewritten_body)
= expand_and_rename_fn_decl_and_block(sig.decl, body, fld);
(ast::MethodSig {
generics: fld.fold_generics(sig.generics),
abi: sig.abi,
explicit_self: fld.fold_explicit_self(sig.explicit_self),
unsafety: sig.unsafety,
constness: sig.constness,
decl: rewritten_fn_decl
}, rewritten_body)
}
pub fn expand_type(t: P<ast::Ty>, fld: &mut MacroExpander) -> P<ast::Ty> {
let t = match t.node.clone() {
ast::Ty_::TyMac(mac) => {
if fld.cx.ecfg.features.unwrap().type_macros {
let expanded_ty = match expand_mac_invoc(mac, t.span,
|r| r.make_ty(),
mark_ty,
fld) {
Some(ty) => ty,
None => {
return DummyResult::raw_ty(t.span);
}
};
// Keep going, outside-in.
let fully_expanded = fld.fold_ty(expanded_ty);
fld.cx.bt_pop();
fully_expanded.map(|t| ast::Ty {
id: ast::DUMMY_NODE_ID,
node: t.node,
span: t.span,
})
} else {
feature_gate::emit_feature_err(
&fld.cx.parse_sess.span_diagnostic,
"type_macros",
t.span,
feature_gate::GateIssue::Language,
"type macros are experimental");
DummyResult::raw_ty(t.span)
}
}
_ => t
};
fold::noop_fold_ty(t, fld)
}
/// A tree-folder that performs macro expansion
pub struct MacroExpander<'a, 'b:'a> {
pub cx: &'a mut ExtCtxt<'b>,
}
impl<'a, 'b> MacroExpander<'a, 'b> {
pub fn new(cx: &'a mut ExtCtxt<'b>) -> MacroExpander<'a, 'b> {
MacroExpander { cx: cx }
}
}
impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
expand_expr(expr, self)
}
fn fold_pat(&mut self, pat: P<ast::Pat>) -> P<ast::Pat> {
expand_pat(pat, self)
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
expand_item(item, self)
}
fn fold_item_underscore(&mut self, item: ast::Item_) -> ast::Item_ {
expand_item_underscore(item, self)
}
fn fold_stmt(&mut self, stmt: P<ast::Stmt>) -> SmallVector<P<ast::Stmt>> {
expand_stmt(stmt, self)
}
fn fold_block(&mut self, block: P<Block>) -> P<Block> {
expand_block(block, self)
}
fn fold_arm(&mut self, arm: ast::Arm) -> ast::Arm {
expand_arm(arm, self)
}
fn fold_trait_item(&mut self, i: P<ast::TraitItem>) -> SmallVector<P<ast::TraitItem>> {
expand_annotatable(Annotatable::TraitItem(i), self)
.into_iter().map(|i| i.expect_trait_item()).collect()
}
fn fold_impl_item(&mut self, i: P<ast::ImplItem>) -> SmallVector<P<ast::ImplItem>> {
expand_annotatable(Annotatable::ImplItem(i), self)
.into_iter().map(|i| i.expect_impl_item()).collect()
}
fn fold_ty(&mut self, ty: P<ast::Ty>) -> P<ast::Ty> {
expand_type(ty, self)
}
fn new_span(&mut self, span: Span) -> Span {
new_span(self.cx, span)
}
}
fn new_span(cx: &ExtCtxt, sp: Span) -> Span {
/* this discards information in the case of macro-defining macros */
Span {
lo: sp.lo,
hi: sp.hi,
expn_id: cx.backtrace(),
}
}
pub struct ExpansionConfig<'feat> {
pub crate_name: String,
pub features: Option<&'feat Features>,
pub recursion_limit: usize,
pub trace_mac: bool,
}
macro_rules! feature_tests {
($( fn $getter:ident = $field:ident, )*) => {
$(
pub fn $getter(&self) -> bool {
match self.features {
Some(&Features { $field: true, .. }) => true,
_ => false,
}
}
)*
}
}
impl<'feat> ExpansionConfig<'feat> {
pub fn default(crate_name: String) -> ExpansionConfig<'static> {
ExpansionConfig {
crate_name: crate_name,
features: None,
recursion_limit: 64,
trace_mac: false,
}
}
feature_tests! {
fn enable_quotes = allow_quote,
fn enable_asm = allow_asm,
fn enable_log_syntax = allow_log_syntax,
fn enable_concat_idents = allow_concat_idents,
fn enable_trace_macros = allow_trace_macros,
fn enable_allow_internal_unstable = allow_internal_unstable,
fn enable_custom_derive = allow_custom_derive,
fn enable_pushpop_unsafe = allow_pushpop_unsafe,
}
}
pub fn expand_crate<'feat>(parse_sess: &parse::ParseSess,
cfg: ExpansionConfig<'feat>,
// these are the macros being imported to this crate:
imported_macros: Vec<ast::MacroDef>,
user_exts: Vec<NamedSyntaxExtension>,
feature_gated_cfgs: &mut Vec<GatedCfg>,
c: Crate) -> (Crate, HashSet<Name>) {
let mut cx = ExtCtxt::new(parse_sess, c.config.clone(), cfg,
feature_gated_cfgs);
if std_inject::no_core(&c) {
cx.crate_root = None;
} else if std_inject::no_std(&c) {
cx.crate_root = Some("core");
} else {
cx.crate_root = Some("std");
}
let ret = {
let mut expander = MacroExpander::new(&mut cx);
for def in imported_macros {
expander.cx.insert_macro(def);
}
for (name, extension) in user_exts {
expander.cx.syntax_env.insert(name, extension);
}
let mut ret = expander.fold_crate(c);
ret.exported_macros = expander.cx.exported_macros.clone();
parse_sess.span_diagnostic.handler().abort_if_errors();
ret
};
return (ret, cx.syntax_env.names);
}
// HYGIENIC CONTEXT EXTENSION:
// all of these functions are for walking over
// ASTs and making some change to the context of every
// element that has one. a CtxtFn is a trait-ified
// version of a closure in (SyntaxContext -> SyntaxContext).
// the ones defined here include:
// Marker - add a mark to a context
// A Marker adds the given mark to the syntax context
struct Marker { mark: Mrk }
impl Folder for Marker {
fn fold_ident(&mut self, id: Ident) -> Ident {
ast::Ident::new(id.name, mtwt::apply_mark(self.mark, id.ctxt))
}
fn fold_mac(&mut self, Spanned {node, span}: ast::Mac) -> ast::Mac {
Spanned {
node: Mac_ {
path: self.fold_path(node.path),
tts: self.fold_tts(&node.tts),
ctxt: mtwt::apply_mark(self.mark, node.ctxt),
},
span: span,
}
}
}
// apply a given mark to the given token trees. Used prior to expansion of a macro.
fn mark_tts(tts: &[TokenTree], m: Mrk) -> Vec<TokenTree> {
noop_fold_tts(tts, &mut Marker{mark:m})
}
// apply a given mark to the given expr. Used following the expansion of a macro.
fn mark_expr(expr: P<ast::Expr>, m: Mrk) -> P<ast::Expr> {
Marker{mark:m}.fold_expr(expr)
}
// apply a given mark to the given pattern. Used following the expansion of a macro.
fn mark_pat(pat: P<ast::Pat>, m: Mrk) -> P<ast::Pat> {
Marker{mark:m}.fold_pat(pat)
}
// apply a given mark to the given stmt. Used following the expansion of a macro.
fn mark_stmt(stmt: P<ast::Stmt>, m: Mrk) -> P<ast::Stmt> {
Marker{mark:m}.fold_stmt(stmt)
.expect_one("marking a stmt didn't return exactly one stmt")
}
// apply a given mark to the given item. Used following the expansion of a macro.
fn mark_item(expr: P<ast::Item>, m: Mrk) -> P<ast::Item> {
Marker{mark:m}.fold_item(expr)
.expect_one("marking an item didn't return exactly one item")
}
// apply a given mark to the given item. Used following the expansion of a macro.
fn mark_impl_item(ii: P<ast::ImplItem>, m: Mrk) -> P<ast::ImplItem> {
Marker{mark:m}.fold_impl_item(ii)
.expect_one("marking an impl item didn't return exactly one impl item")
}
fn mark_ty(ty: P<ast::Ty>, m: Mrk) -> P<ast::Ty> {
Marker { mark: m }.fold_ty(ty)
}
/// Check that there are no macro invocations left in the AST:
pub fn check_for_macros(sess: &parse::ParseSess, krate: &ast::Crate) {
visit::walk_crate(&mut MacroExterminator{sess:sess}, krate);
}
/// A visitor that ensures that no macro invocations remain in an AST.
struct MacroExterminator<'a>{
sess: &'a parse::ParseSess
}
impl<'a, 'v> Visitor<'v> for MacroExterminator<'a> {
fn visit_mac(&mut self, mac: &ast::Mac) {
self.sess.span_diagnostic.span_bug(mac.span,
"macro exterminator: expected AST \
with no macro invocations");
}
}
#[cfg(test)]
mod tests {
use super::{pattern_bindings, expand_crate};
use super::{PatIdentFinder, IdentRenamer, PatIdentRenamer, ExpansionConfig};
use ast;
use ast::Name;
use codemap;
use ext::mtwt;
use fold::Folder;
use parse;
use parse::token;
use util::parser_testing::{string_to_parser};
use util::parser_testing::{string_to_pat, string_to_crate, strs_to_idents};
use visit;
use visit::Visitor;
// a visitor that extracts the paths
// from a given thingy and puts them in a mutable
// array (passed in to the traversal)
#[derive(Clone)]
struct PathExprFinderContext {
path_accumulator: Vec<ast::Path> ,
}
impl<'v> Visitor<'v> for PathExprFinderContext {
fn visit_expr(&mut self, expr: &ast::Expr) {
if let ast::ExprPath(None, ref p) = expr.node {
self.path_accumulator.push(p.clone());
}
visit::walk_expr(self, expr);
}
}
// find the variable references in a crate
fn crate_varrefs(the_crate : &ast::Crate) -> Vec<ast::Path> {
let mut path_finder = PathExprFinderContext{path_accumulator:Vec::new()};
visit::walk_crate(&mut path_finder, the_crate);
path_finder.path_accumulator
}
/// A Visitor that extracts the identifiers from a thingy.
// as a side note, I'm starting to want to abstract over these....
struct IdentFinder {
ident_accumulator: Vec<ast::Ident>
}
impl<'v> Visitor<'v> for IdentFinder {
fn visit_ident(&mut self, _: codemap::Span, id: ast::Ident){
self.ident_accumulator.push(id);
}
}
/// Find the idents in a crate
fn crate_idents(the_crate: &ast::Crate) -> Vec<ast::Ident> {
let mut ident_finder = IdentFinder{ident_accumulator: Vec::new()};
visit::walk_crate(&mut ident_finder, the_crate);
ident_finder.ident_accumulator
}
// these following tests are quite fragile, in that they don't test what
// *kind* of failure occurs.
fn test_ecfg() -> ExpansionConfig<'static> {
ExpansionConfig::default("test".to_string())
}
// make sure that macros can't escape fns
#[should_panic]
#[test] fn macros_cant_escape_fns_test () {
let src = "fn bogus() {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess);
// should fail:
expand_crate(&sess,test_ecfg(),vec!(),vec!(), &mut vec![], crate_ast);
}
// make sure that macros can't escape modules
#[should_panic]
#[test] fn macros_cant_escape_mods_test () {
let src = "mod foo {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess);
expand_crate(&sess,test_ecfg(),vec!(),vec!(), &mut vec![], crate_ast);
}
// macro_use modules should allow macros to escape
#[test] fn macros_can_escape_flattened_mods_test () {
let src = "#[macro_use] mod foo {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess);
expand_crate(&sess, test_ecfg(), vec!(), vec!(), &mut vec![], crate_ast);
}
fn expand_crate_str(crate_str: String) -> ast::Crate {
let ps = parse::ParseSess::new();
let crate_ast = panictry!(string_to_parser(&ps, crate_str).parse_crate_mod());
// the cfg argument actually does matter, here...
expand_crate(&ps,test_ecfg(),vec!(),vec!(), &mut vec![], crate_ast).0
}
// find the pat_ident paths in a crate
fn crate_bindings(the_crate : &ast::Crate) -> Vec<ast::Ident> {
let mut name_finder = PatIdentFinder{ident_accumulator:Vec::new()};
visit::walk_crate(&mut name_finder, the_crate);
name_finder.ident_accumulator
}
#[test] fn macro_tokens_should_match(){
expand_crate_str(
"macro_rules! m((a)=>(13)) ;fn main(){m!(a);}".to_string());
}
// should be able to use a bound identifier as a literal in a macro definition:
#[test] fn self_macro_parsing(){
expand_crate_str(
"macro_rules! foo ((zz) => (287;));
fn f(zz: i32) {foo!(zz);}".to_string()
);
}
// renaming tests expand a crate and then check that the bindings match
// the right varrefs. The specification of the test case includes the
// text of the crate, and also an array of arrays. Each element in the
// outer array corresponds to a binding in the traversal of the AST
// induced by visit. Each of these arrays contains a list of indexes,
// interpreted as the varrefs in the varref traversal that this binding
// should match. So, for instance, in a program with two bindings and
// three varrefs, the array [[1, 2], [0]] would indicate that the first
// binding should match the second two varrefs, and the second binding
// should match the first varref.
//
// Put differently; this is a sparse representation of a boolean matrix
// indicating which bindings capture which identifiers.
//
// Note also that this matrix is dependent on the implicit ordering of
// the bindings and the varrefs discovered by the name-finder and the path-finder.
//
// The comparisons are done post-mtwt-resolve, so we're comparing renamed
// names; differences in marks don't matter any more.
//
// oog... I also want tests that check "bound-identifier-=?". That is,
// not just "do these have the same name", but "do they have the same
// name *and* the same marks"? Understanding this is really pretty painful.
// in principle, you might want to control this boolean on a per-varref basis,
// but that would make things even harder to understand, and might not be
// necessary for thorough testing.
type RenamingTest = (&'static str, Vec<Vec<usize>>, bool);
#[test]
fn automatic_renaming () {
let tests: Vec<RenamingTest> =
vec!(// b & c should get new names throughout, in the expr too:
("fn a() -> i32 { let b = 13; let c = b; b+c }",
vec!(vec!(0,1),vec!(2)), false),
// both x's should be renamed (how is this causing a bug?)
("fn main () {let x: i32 = 13;x;}",
vec!(vec!(0)), false),
// the use of b after the + should be renamed, the other one not:
("macro_rules! f (($x:ident) => (b + $x)); fn a() -> i32 { let b = 13; f!(b)}",
vec!(vec!(1)), false),
// the b before the plus should not be renamed (requires marks)
("macro_rules! f (($x:ident) => ({let b=9; ($x + b)})); fn a() -> i32 { f!(b)}",
vec!(vec!(1)), false),
// the marks going in and out of letty should cancel, allowing that $x to
// capture the one following the semicolon.
// this was an awesome test case, and caught a *lot* of bugs.
("macro_rules! letty(($x:ident) => (let $x = 15;));
macro_rules! user(($x:ident) => ({letty!($x); $x}));
fn main() -> i32 {user!(z)}",
vec!(vec!(0)), false)
);
for (idx,s) in tests.iter().enumerate() {
run_renaming_test(s,idx);
}
}
// no longer a fixme #8062: this test exposes a *potential* bug; our system does
// not behave exactly like MTWT, but a conversation with Matthew Flatt
// suggests that this can only occur in the presence of local-expand, which
// we have no plans to support. ... unless it's needed for item hygiene....
#[ignore]
#[test]
fn issue_8062(){
run_renaming_test(
&("fn main() {let hrcoo = 19; macro_rules! getx(()=>(hrcoo)); getx!();}",
vec!(vec!(0)), true), 0)
}
// FIXME #6994:
// the z flows into and out of two macros (g & f) along one path, and one
// (just g) along the other, so the result of the whole thing should
// be "let z_123 = 3; z_123"
#[ignore]
#[test]
fn issue_6994(){
run_renaming_test(
&("macro_rules! g (($x:ident) =>
({macro_rules! f(($y:ident)=>({let $y=3;$x}));f!($x)}));
fn a(){g!(z)}",
vec!(vec!(0)),false),
0)
}
// match variable hygiene. Should expand into
// fn z() {match 8 {x_1 => {match 9 {x_2 | x_2 if x_2 == x_1 => x_2 + x_1}}}}
#[test]
fn issue_9384(){
run_renaming_test(
&("macro_rules! bad_macro (($ex:expr) => ({match 9 {x | x if x == $ex => x + $ex}}));
fn z() {match 8 {x => bad_macro!(x)}}",
// NB: the third "binding" is the repeat of the second one.
vec!(vec!(1,3),vec!(0,2),vec!(0,2)),
true),
0)
}
// interpolated nodes weren't getting labeled.
// should expand into
// fn main(){let g1_1 = 13; g1_1}}
#[test]
fn pat_expand_issue_15221(){
run_renaming_test(
&("macro_rules! inner ( ($e:pat ) => ($e));
macro_rules! outer ( ($e:pat ) => (inner!($e)));
fn main() { let outer!(g) = 13; g;}",
vec!(vec!(0)),
true),
0)
}
// create a really evil test case where a $x appears inside a binding of $x
// but *shouldn't* bind because it was inserted by a different macro....
// can't write this test case until we have macro-generating macros.
// method arg hygiene
// method expands to fn get_x(&self_0, x_1: i32) {self_0 + self_2 + x_3 + x_1}
#[test]
fn method_arg_hygiene(){
run_renaming_test(
&("macro_rules! inject_x (()=>(x));
macro_rules! inject_self (()=>(self));
struct A;
impl A{fn get_x(&self, x: i32) {self + inject_self!() + inject_x!() + x;} }",
vec!(vec!(0),vec!(3)),
true),
0)
}
// ooh, got another bite?
// expands to struct A; impl A {fn thingy(&self_1) {self_1;}}
#[test]
fn method_arg_hygiene_2(){
run_renaming_test(
&("struct A;
macro_rules! add_method (($T:ty) =>
(impl $T { fn thingy(&self) {self;} }));
add_method!(A);",
vec!(vec!(0)),
true),
0)
}
// item fn hygiene
// expands to fn q(x_1: i32){fn g(x_2: i32){x_2 + x_1};}
#[test]
fn issue_9383(){
run_renaming_test(
&("macro_rules! bad_macro (($ex:expr) => (fn g(x: i32){ x + $ex }));
fn q(x: i32) { bad_macro!(x); }",
vec!(vec!(1),vec!(0)),true),
0)
}
// closure arg hygiene (ExprClosure)
// expands to fn f(){(|x_1 : i32| {(x_2 + x_1)})(3);}
#[test]
fn closure_arg_hygiene(){
run_renaming_test(
&("macro_rules! inject_x (()=>(x));
fn f(){(|x : i32| {(inject_x!() + x)})(3);}",
vec!(vec!(1)),
true),
0)
}
// macro_rules in method position. Sadly, unimplemented.
#[test]
fn macro_in_method_posn(){
expand_crate_str(
"macro_rules! my_method (() => (fn thirteen(&self) -> i32 {13}));
struct A;
impl A{ my_method!(); }
fn f(){A.thirteen;}".to_string());
}
// another nested macro
// expands to impl Entries {fn size_hint(&self_1) {self_1;}
#[test]
fn item_macro_workaround(){
run_renaming_test(
&("macro_rules! item { ($i:item) => {$i}}
struct Entries;
macro_rules! iterator_impl {
() => { item!( impl Entries { fn size_hint(&self) { self;}});}}
iterator_impl! { }",
vec!(vec!(0)), true),
0)
}
// run one of the renaming tests
fn run_renaming_test(t: &RenamingTest, test_idx: usize) {
let invalid_name = token::special_idents::invalid.name;
let (teststr, bound_connections, bound_ident_check) = match *t {
(ref str,ref conns, bic) => (str.to_string(), conns.clone(), bic)
};
let cr = expand_crate_str(teststr.to_string());
let bindings = crate_bindings(&cr);
let varrefs = crate_varrefs(&cr);
// must be one check clause for each binding:
assert_eq!(bindings.len(),bound_connections.len());
for (binding_idx,shouldmatch) in bound_connections.iter().enumerate() {
let binding_name = mtwt::resolve(bindings[binding_idx]);
let binding_marks = mtwt::marksof(bindings[binding_idx].ctxt, invalid_name);
// shouldmatch can't name varrefs that don't exist:
assert!((shouldmatch.is_empty()) ||
(varrefs.len() > *shouldmatch.iter().max().unwrap()));
for (idx,varref) in varrefs.iter().enumerate() {
let print_hygiene_debug_info = || {
// good lord, you can't make a path with 0 segments, can you?
let final_varref_ident = match varref.segments.last() {
Some(pathsegment) => pathsegment.identifier,
None => panic!("varref with 0 path segments?")
};
let varref_name = mtwt::resolve(final_varref_ident);
let varref_idents : Vec<ast::Ident>
= varref.segments.iter().map(|s| s.identifier)
.collect();
println!("varref #{}: {:?}, resolves to {}",idx, varref_idents, varref_name);
println!("varref's first segment's string: \"{}\"", final_varref_ident);
println!("binding #{}: {}, resolves to {}",
binding_idx, bindings[binding_idx], binding_name);
mtwt::with_sctable(|x| mtwt::display_sctable(x));
};
if shouldmatch.contains(&idx) {
// it should be a path of length 1, and it should
// be free-identifier=? or bound-identifier=? to the given binding
assert_eq!(varref.segments.len(),1);
let varref_name = mtwt::resolve(varref.segments[0].identifier);
let varref_marks = mtwt::marksof(varref.segments[0]
.identifier
.ctxt,
invalid_name);
if !(varref_name==binding_name) {
println!("uh oh, should match but doesn't:");
print_hygiene_debug_info();
}
assert_eq!(varref_name,binding_name);
if bound_ident_check {
// we're checking bound-identifier=?, and the marks
// should be the same, too:
assert_eq!(varref_marks,binding_marks.clone());
}
} else {
let varref_name = mtwt::resolve(varref.segments[0].identifier);
let fail = (varref.segments.len() == 1)
&& (varref_name == binding_name);
// temp debugging:
if fail {
println!("failure on test {}",test_idx);
println!("text of test case: \"{}\"", teststr);
println!("");
println!("uh oh, matches but shouldn't:");
print_hygiene_debug_info();
}
assert!(!fail);
}
}
}
}
#[test]
fn fmt_in_macro_used_inside_module_macro() {
let crate_str = "macro_rules! fmt_wrap(($b:expr)=>($b.to_string()));
macro_rules! foo_module (() => (mod generated { fn a() { let xx = 147; fmt_wrap!(xx);}}));
foo_module!();
".to_string();
let cr = expand_crate_str(crate_str);
// find the xx binding
let bindings = crate_bindings(&cr);
let cxbinds: Vec<&ast::Ident> =
bindings.iter().filter(|b| b.name.as_str() == "xx").collect();
let cxbinds: &[&ast::Ident] = &cxbinds[..];
let cxbind = match (cxbinds.len(), cxbinds.get(0)) {
(1, Some(b)) => *b,
_ => panic!("expected just one binding for ext_cx")
};
let resolved_binding = mtwt::resolve(*cxbind);
let varrefs = crate_varrefs(&cr);
// the xx binding should bind all of the xx varrefs:
for (idx,v) in varrefs.iter().filter(|p| {
p.segments.len() == 1
&& p.segments[0].identifier.name.as_str() == "xx"
}).enumerate() {
if mtwt::resolve(v.segments[0].identifier) != resolved_binding {
println!("uh oh, xx binding didn't match xx varref:");
println!("this is xx varref \\# {}", idx);
println!("binding: {}", cxbind);
println!("resolves to: {}", resolved_binding);
println!("varref: {}", v.segments[0].identifier);
println!("resolves to: {}",
mtwt::resolve(v.segments[0].identifier));
mtwt::with_sctable(|x| mtwt::display_sctable(x));
}
assert_eq!(mtwt::resolve(v.segments[0].identifier),
resolved_binding);
};
}
#[test]
fn pat_idents(){
let pat = string_to_pat(
"(a,Foo{x:c @ (b,9),y:Bar(4,d)})".to_string());
let idents = pattern_bindings(&pat);
assert_eq!(idents, strs_to_idents(vec!("a","c","b","d")));
}
// test the list of identifier patterns gathered by the visitor. Note that
// 'None' is listed as an identifier pattern because we don't yet know that
// it's the name of a 0-ary variant, and that 'i' appears twice in succession.
#[test]
fn crate_bindings_test(){
let the_crate = string_to_crate("fn main (a: i32) -> i32 {|b| {
match 34 {None => 3, Some(i) | i => j, Foo{k:z,l:y} => \"banana\"}} }".to_string());
let idents = crate_bindings(&the_crate);
assert_eq!(idents, strs_to_idents(vec!("a","b","None","i","i","z","y")));
}
// test the IdentRenamer directly
#[test]
fn ident_renamer_test () {
let the_crate = string_to_crate("fn f(x: i32){let x = x; x}".to_string());
let f_ident = token::str_to_ident("f");
let x_ident = token::str_to_ident("x");
let int_ident = token::str_to_ident("i32");
let renames = vec!((x_ident,Name(16)));
let mut renamer = IdentRenamer{renames: &renames};
let renamed_crate = renamer.fold_crate(the_crate);
let idents = crate_idents(&renamed_crate);
let resolved : Vec<ast::Name> = idents.iter().map(|id| mtwt::resolve(*id)).collect();
assert_eq!(resolved, [f_ident.name,Name(16),int_ident.name,Name(16),Name(16),Name(16)]);
}
// test the PatIdentRenamer; only PatIdents get renamed
#[test]
fn pat_ident_renamer_test () {
let the_crate = string_to_crate("fn f(x: i32){let x = x; x}".to_string());
let f_ident = token::str_to_ident("f");
let x_ident = token::str_to_ident("x");
let int_ident = token::str_to_ident("i32");
let renames = vec!((x_ident,Name(16)));
let mut renamer = PatIdentRenamer{renames: &renames};
let renamed_crate = renamer.fold_crate(the_crate);
let idents = crate_idents(&renamed_crate);
let resolved : Vec<ast::Name> = idents.iter().map(|id| mtwt::resolve(*id)).collect();
let x_name = x_ident.name;
assert_eq!(resolved, [f_ident.name,Name(16),int_ident.name,Name(16),x_name,x_name]);
}
}
| 39.028495 | 99 | 0.51432 |
de5da9f95378dc3cb6d21e56f3bbe890d824db5c
| 2,806 |
use maud::{html, DOCTYPE};
use rocket::State;
use crate::logging::clear_user;
use crate::web::partials::{account_search, headers};
use crate::web::response::CustomResponse;
use crate::web::TrackingCode;
#[rocket::get("/")]
pub async fn get(
tracking_code: &State<TrackingCode>,
) -> crate::web::result::Result<CustomResponse> {
clear_user();
let markup = html! {
(DOCTYPE)
html lang="en" {
head {
(headers())
title { "Я – статист в World of Tanks Blitz!" }
}
body {
(tracking_code.0)
section.hero.is-fullheight {
div.hero-body {
div.container {
div.columns {
div.column."is-8"."is-offset-2" {
form action="/search" method="GET" {
div.field.is-grouped.is-grouped-centered.is-grouped-multiline {
div.control {
div.buttons.has-addons.is-small.is-rounded {
a.button.is-rounded.is-small href="/ru/103809874" { "Invincible_Beast" }
a.button.is-rounded.is-small href="/ru/133054164" { "Lucky_Vikk" }
}
}
p.control {
a.button.is-rounded.is-small href="/ru/3851977" { "D_W_S" }
}
p.control {
a.button.is-rounded.is-small href="/ru/5303075" { "Perfect_M1nd" }
}
p.control {
a.button.is-rounded.is-small href="/ru/4435872" { "_n0_skill_just_luck_" }
}
p.control {
a.button.is-rounded.is-small href="/ru/2992069" { "Tortik" }
}
}
(account_search("is-medium", "", true, false))
}
}
}
}
}
}
}
}
};
Ok(CustomResponse::CachedMarkup(
"max-age=604800, stale-while-revalidate=86400",
markup,
))
}
| 42.515152 | 124 | 0.34248 |
6aa13f67d9d571d84d86d92b4f236b28f4490604
| 5,077 |
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - Port 1 Input"]
pub p1in: crate::Reg<p1in::P1IN_SPEC>,
#[doc = "0x01 - Port 2 Input"]
pub p2in: crate::Reg<p2in::P2IN_SPEC>,
#[doc = "0x02 - Port 1 Output"]
pub p1out: crate::Reg<p1out::P1OUT_SPEC>,
#[doc = "0x03 - Port 2 Output"]
pub p2out: crate::Reg<p2out::P2OUT_SPEC>,
#[doc = "0x04 - Port 1 Direction"]
pub p1dir: crate::Reg<p1dir::P1DIR_SPEC>,
#[doc = "0x05 - Port 2 Direction"]
pub p2dir: crate::Reg<p2dir::P2DIR_SPEC>,
#[doc = "0x06 - Port 1 Resistor Enable"]
pub p1ren: crate::Reg<p1ren::P1REN_SPEC>,
#[doc = "0x07 - Port 2 Resistor Enable"]
pub p2ren: crate::Reg<p2ren::P2REN_SPEC>,
#[doc = "0x08 - Port 1 Drive Strenght"]
pub p1ds: crate::Reg<p1ds::P1DS_SPEC>,
#[doc = "0x09 - Port 2 Drive Strenght"]
pub p2ds: crate::Reg<p2ds::P2DS_SPEC>,
#[doc = "0x0a - Port 1 Selection"]
pub p1sel: crate::Reg<p1sel::P1SEL_SPEC>,
#[doc = "0x0b - Port 2 Selection"]
pub p2sel: crate::Reg<p2sel::P2SEL_SPEC>,
_reserved12: [u8; 2usize],
#[doc = "0x0e - Port 1 Interrupt Vector Word"]
pub p1iv: crate::Reg<p1iv::P1IV_SPEC>,
_reserved13: [u8; 8usize],
#[doc = "0x18 - Port 1 Interrupt Edge Select"]
pub p1ies: crate::Reg<p1ies::P1IES_SPEC>,
#[doc = "0x19 - Port 2 Interrupt Edge Select"]
pub p2ies: crate::Reg<p2ies::P2IES_SPEC>,
#[doc = "0x1a - Port 1 Interrupt Enable"]
pub p1ie: crate::Reg<p1ie::P1IE_SPEC>,
#[doc = "0x1b - Port 2 Interrupt Enable"]
pub p2ie: crate::Reg<p2ie::P2IE_SPEC>,
#[doc = "0x1c - Port 1 Interrupt Flag"]
pub p1ifg: crate::Reg<p1ifg::P1IFG_SPEC>,
#[doc = "0x1d - Port 2 Interrupt Flag"]
pub p2ifg: crate::Reg<p2ifg::P2IFG_SPEC>,
#[doc = "0x1e - Port 2 Interrupt Vector Word"]
pub p2iv: crate::Reg<p2iv::P2IV_SPEC>,
}
#[doc = "P1IN register accessor: an alias for `Reg<P1IN_SPEC>`"]
pub type P1IN = crate::Reg<p1in::P1IN_SPEC>;
#[doc = "Port 1 Input"]
pub mod p1in;
#[doc = "P2IN register accessor: an alias for `Reg<P2IN_SPEC>`"]
pub type P2IN = crate::Reg<p2in::P2IN_SPEC>;
#[doc = "Port 2 Input"]
pub mod p2in;
#[doc = "P1OUT register accessor: an alias for `Reg<P1OUT_SPEC>`"]
pub type P1OUT = crate::Reg<p1out::P1OUT_SPEC>;
#[doc = "Port 1 Output"]
pub mod p1out;
#[doc = "P2OUT register accessor: an alias for `Reg<P2OUT_SPEC>`"]
pub type P2OUT = crate::Reg<p2out::P2OUT_SPEC>;
#[doc = "Port 2 Output"]
pub mod p2out;
#[doc = "P1DIR register accessor: an alias for `Reg<P1DIR_SPEC>`"]
pub type P1DIR = crate::Reg<p1dir::P1DIR_SPEC>;
#[doc = "Port 1 Direction"]
pub mod p1dir;
#[doc = "P2DIR register accessor: an alias for `Reg<P2DIR_SPEC>`"]
pub type P2DIR = crate::Reg<p2dir::P2DIR_SPEC>;
#[doc = "Port 2 Direction"]
pub mod p2dir;
#[doc = "P1REN register accessor: an alias for `Reg<P1REN_SPEC>`"]
pub type P1REN = crate::Reg<p1ren::P1REN_SPEC>;
#[doc = "Port 1 Resistor Enable"]
pub mod p1ren;
#[doc = "P2REN register accessor: an alias for `Reg<P2REN_SPEC>`"]
pub type P2REN = crate::Reg<p2ren::P2REN_SPEC>;
#[doc = "Port 2 Resistor Enable"]
pub mod p2ren;
#[doc = "P1DS register accessor: an alias for `Reg<P1DS_SPEC>`"]
pub type P1DS = crate::Reg<p1ds::P1DS_SPEC>;
#[doc = "Port 1 Drive Strenght"]
pub mod p1ds;
#[doc = "P2DS register accessor: an alias for `Reg<P2DS_SPEC>`"]
pub type P2DS = crate::Reg<p2ds::P2DS_SPEC>;
#[doc = "Port 2 Drive Strenght"]
pub mod p2ds;
#[doc = "P1SEL register accessor: an alias for `Reg<P1SEL_SPEC>`"]
pub type P1SEL = crate::Reg<p1sel::P1SEL_SPEC>;
#[doc = "Port 1 Selection"]
pub mod p1sel;
#[doc = "P2SEL register accessor: an alias for `Reg<P2SEL_SPEC>`"]
pub type P2SEL = crate::Reg<p2sel::P2SEL_SPEC>;
#[doc = "Port 2 Selection"]
pub mod p2sel;
#[doc = "P1IES register accessor: an alias for `Reg<P1IES_SPEC>`"]
pub type P1IES = crate::Reg<p1ies::P1IES_SPEC>;
#[doc = "Port 1 Interrupt Edge Select"]
pub mod p1ies;
#[doc = "P2IES register accessor: an alias for `Reg<P2IES_SPEC>`"]
pub type P2IES = crate::Reg<p2ies::P2IES_SPEC>;
#[doc = "Port 2 Interrupt Edge Select"]
pub mod p2ies;
#[doc = "P1IE register accessor: an alias for `Reg<P1IE_SPEC>`"]
pub type P1IE = crate::Reg<p1ie::P1IE_SPEC>;
#[doc = "Port 1 Interrupt Enable"]
pub mod p1ie;
#[doc = "P2IE register accessor: an alias for `Reg<P2IE_SPEC>`"]
pub type P2IE = crate::Reg<p2ie::P2IE_SPEC>;
#[doc = "Port 2 Interrupt Enable"]
pub mod p2ie;
#[doc = "P1IFG register accessor: an alias for `Reg<P1IFG_SPEC>`"]
pub type P1IFG = crate::Reg<p1ifg::P1IFG_SPEC>;
#[doc = "Port 1 Interrupt Flag"]
pub mod p1ifg;
#[doc = "P2IFG register accessor: an alias for `Reg<P2IFG_SPEC>`"]
pub type P2IFG = crate::Reg<p2ifg::P2IFG_SPEC>;
#[doc = "Port 2 Interrupt Flag"]
pub mod p2ifg;
#[doc = "P1IV register accessor: an alias for `Reg<P1IV_SPEC>`"]
pub type P1IV = crate::Reg<p1iv::P1IV_SPEC>;
#[doc = "Port 1 Interrupt Vector Word"]
pub mod p1iv;
#[doc = "P2IV register accessor: an alias for `Reg<P2IV_SPEC>`"]
pub type P2IV = crate::Reg<p2iv::P2IV_SPEC>;
#[doc = "Port 2 Interrupt Vector Word"]
pub mod p2iv;
| 39.976378 | 66 | 0.665354 |
67561ad6455b29f91e61bd62ba583b89e13ad519
| 240 |
enum List {
Cons(i32, Box<List>),
Nil
}
use List::{Cons, Nil};
fn main() {
let b = Box::new(5);
println!("b = {}", b);
let list = Cons(1,
Box::new(Cons(2,
Box::new (Cons(3, Box::new(Nil))))));
}
| 15 | 49 | 0.45 |
61aa855a798f8eca8d0f07efc0a2ef55ae9a3f22
| 151 |
fn main() {
let data: [u8; 1024*1024*1024] = [42; 1024*1024*1024];
//~^ ERROR: tried to allocate
assert_eq!(data.len(), 1024*1024*1024);
}
| 25.166667 | 58 | 0.589404 |
db986f9b822ea190cfa07924838d6eee6fe4836c
| 18,308 |
/// Defines the HTTP configuration for an API service. It contains a list of
/// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
/// to one or more HTTP REST API methods.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Http {
/// A list of HTTP configuration rules that apply to individual API methods.
///
/// **NOTE:** All service configuration rules follow "last one wins" order.
#[prost(message, repeated, tag = "1")]
pub rules: ::prost::alloc::vec::Vec<HttpRule>,
/// When set to true, URL path parameters will be fully URI-decoded except in
/// cases of single segment matches in reserved expansion, where "%2F" will be
/// left encoded.
///
/// The default behavior is to not decode RFC 6570 reserved characters in multi
/// segment matches.
#[prost(bool, tag = "2")]
pub fully_decode_reserved_expansion: bool,
}
/// # gRPC Transcoding
///
/// gRPC Transcoding is a feature for mapping between a gRPC method and one or
/// more HTTP REST endpoints. It allows developers to build a single API service
/// that supports both gRPC APIs and REST APIs. Many systems, including [Google
/// APIs](https://github.com/googleapis/googleapis),
/// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
/// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
/// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
/// and use it for large scale production services.
///
/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
/// how different portions of the gRPC request message are mapped to the URL
/// path, URL query parameters, and HTTP request body. It also controls how the
/// gRPC response message is mapped to the HTTP response body. `HttpRule` is
/// typically specified as an `google.api.http` annotation on the gRPC method.
///
/// Each mapping specifies a URL path template and an HTTP method. The path
/// template may refer to one or more fields in the gRPC request message, as long
/// as each field is a non-repeated field with a primitive (non-message) type.
/// The path template controls how fields of the request message are mapped to
/// the URL path.
///
/// Example:
///
/// service Messaging {
/// rpc GetMessage(GetMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// get: "/v1/{name=messages/*}"
/// };
/// }
/// }
/// message GetMessageRequest {
/// string name = 1; // Mapped to URL path.
/// }
/// message Message {
/// string text = 1; // The resource content.
/// }
///
/// This enables an HTTP REST to gRPC mapping as below:
///
/// HTTP | gRPC
/// -----|-----
/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")`
///
/// Any fields in the request message which are not bound by the path template
/// automatically become HTTP query parameters if there is no HTTP request body.
/// For example:
///
/// service Messaging {
/// rpc GetMessage(GetMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// get:"/v1/messages/{message_id}"
/// };
/// }
/// }
/// message GetMessageRequest {
/// message SubMessage {
/// string subfield = 1;
/// }
/// string message_id = 1; // Mapped to URL path.
/// int64 revision = 2; // Mapped to URL query parameter `revision`.
/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
/// }
///
/// This enables a HTTP JSON to RPC mapping as below:
///
/// HTTP | gRPC
/// -----|-----
/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
/// "foo"))`
///
/// Note that fields which are mapped to URL query parameters must have a
/// primitive type or a repeated primitive type or a non-repeated message type.
/// In the case of a repeated type, the parameter can be repeated in the URL
/// as `...?param=A¶m=B`. In the case of a message type, each field of the
/// message is mapped to a separate parameter, such as
/// `...?foo.a=A&foo.b=B&foo.c=C`.
///
/// For HTTP methods that allow a request body, the `body` field
/// specifies the mapping. Consider a REST update method on the
/// message resource collection:
///
/// service Messaging {
/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// patch: "/v1/messages/{message_id}"
/// body: "message"
/// };
/// }
/// }
/// message UpdateMessageRequest {
/// string message_id = 1; // mapped to the URL
/// Message message = 2; // mapped to the body
/// }
///
/// The following HTTP JSON to RPC mapping is enabled, where the
/// representation of the JSON in the request body is determined by
/// protos JSON encoding:
///
/// HTTP | gRPC
/// -----|-----
/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
/// "123456" message { text: "Hi!" })`
///
/// The special name `*` can be used in the body mapping to define that
/// every field not bound by the path template should be mapped to the
/// request body. This enables the following alternative definition of
/// the update method:
///
/// service Messaging {
/// rpc UpdateMessage(Message) returns (Message) {
/// option (google.api.http) = {
/// patch: "/v1/messages/{message_id}"
/// body: "*"
/// };
/// }
/// }
/// message Message {
/// string message_id = 1;
/// string text = 2;
/// }
///
///
/// The following HTTP JSON to RPC mapping is enabled:
///
/// HTTP | gRPC
/// -----|-----
/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
/// "123456" text: "Hi!")`
///
/// Note that when using `*` in the body mapping, it is not possible to
/// have HTTP parameters, as all fields not bound by the path end in
/// the body. This makes this option more rarely used in practice when
/// defining REST APIs. The common usage of `*` is in custom methods
/// which don't use the URL at all for transferring data.
///
/// It is possible to define multiple HTTP methods for one RPC by using
/// the `additional_bindings` option. Example:
///
/// service Messaging {
/// rpc GetMessage(GetMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// get: "/v1/messages/{message_id}"
/// additional_bindings {
/// get: "/v1/users/{user_id}/messages/{message_id}"
/// }
/// };
/// }
/// }
/// message GetMessageRequest {
/// string message_id = 1;
/// string user_id = 2;
/// }
///
/// This enables the following two alternative HTTP JSON to RPC mappings:
///
/// HTTP | gRPC
/// -----|-----
/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
/// "123456")`
///
/// ## Rules for HTTP mapping
///
/// 1. Leaf request fields (recursive expansion nested messages in the request
/// message) are classified into three categories:
/// - Fields referred by the path template. They are passed via the URL path.
/// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP
/// request body.
/// - All other fields are passed via the URL query parameters, and the
/// parameter name is the field path in the request message. A repeated
/// field can be represented as multiple query parameters under the same
/// name.
/// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields
/// are passed via URL path and HTTP request body.
/// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all
/// fields are passed via URL path and URL query parameters.
///
/// ### Path template syntax
///
/// Template = "/" Segments [ Verb ] ;
/// Segments = Segment { "/" Segment } ;
/// Segment = "*" | "**" | LITERAL | Variable ;
/// Variable = "{" FieldPath [ "=" Segments ] "}" ;
/// FieldPath = IDENT { "." IDENT } ;
/// Verb = ":" LITERAL ;
///
/// The syntax `*` matches a single URL path segment. The syntax `**` matches
/// zero or more URL path segments, which must be the last part of the URL path
/// except the `Verb`.
///
/// The syntax `Variable` matches part of the URL path as specified by its
/// template. A variable template must not contain other variables. If a variable
/// matches a single path segment, its template may be omitted, e.g. `{var}`
/// is equivalent to `{var=*}`.
///
/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
/// contains any reserved character, such characters should be percent-encoded
/// before the matching.
///
/// If a variable contains exactly one path segment, such as `"{var}"` or
/// `"{var=*}"`, when such a variable is expanded into a URL path on the client
/// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
/// server side does the reverse decoding. Such variables show up in the
/// [Discovery
/// Document](https://developers.google.com/discovery/v1/reference/apis) as
/// `{var}`.
///
/// If a variable contains multiple path segments, such as `"{var=foo/*}"`
/// or `"{var=**}"`, when such a variable is expanded into a URL path on the
/// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
/// The server side does the reverse decoding, except "%2F" and "%2f" are left
/// unchanged. Such variables show up in the
/// [Discovery
/// Document](https://developers.google.com/discovery/v1/reference/apis) as
/// `{+var}`.
///
/// ## Using gRPC API Service Configuration
///
/// gRPC API Service Configuration (service config) is a configuration language
/// for configuring a gRPC service to become a user-facing product. The
/// service config is simply the YAML representation of the `google.api.Service`
/// proto message.
///
/// As an alternative to annotating your proto file, you can configure gRPC
/// transcoding in your service config YAML files. You do this by specifying a
/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
/// effect as the proto annotation. This can be particularly useful if you
/// have a proto that is reused in multiple services. Note that any transcoding
/// specified in the service config will override any matching transcoding
/// configuration in the proto.
///
/// Example:
///
/// http:
/// rules:
/// # Selects a gRPC method and applies HttpRule to it.
/// - selector: example.v1.Messaging.GetMessage
/// get: /v1/messages/{message_id}/{sub.subfield}
///
/// ## Special notes
///
/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
/// proto to JSON conversion must follow the [proto3
/// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
///
/// While the single segment variable follows the semantics of
/// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
/// Expansion, the multi segment variable **does not** follow RFC 6570 Section
/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
/// does not expand special characters like `?` and `#`, which would lead
/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
/// for multi segment variables.
///
/// The path variables **must not** refer to any repeated or mapped field,
/// because client libraries are not capable of handling such variable expansion.
///
/// The path variables **must not** capture the leading "/" character. The reason
/// is that the most common use case "{var}" does not capture the leading "/"
/// character. For consistency, all path variables must share the same behavior.
///
/// Repeated message fields must not be mapped to URL query parameters, because
/// no client library can support such complicated mapping.
///
/// If an API needs to use a JSON array for request or response body, it can map
/// the request or response body to a repeated field. However, some gRPC
/// Transcoding implementations may not support this feature.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct HttpRule {
/// Selects a method to which this rule applies.
///
/// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
#[prost(string, tag = "1")]
pub selector: ::prost::alloc::string::String,
/// The name of the request field whose value is mapped to the HTTP request
/// body, or `*` for mapping all request fields not captured by the path
/// pattern to the HTTP body, or omitted for not having any HTTP request body.
///
/// NOTE: the referred field must be present at the top-level of the request
/// message type.
#[prost(string, tag = "7")]
pub body: ::prost::alloc::string::String,
/// Optional. The name of the response field whose value is mapped to the HTTP
/// response body. When omitted, the entire response message will be used
/// as the HTTP response body.
///
/// NOTE: The referred field must be present at the top-level of the response
/// message type.
#[prost(string, tag = "12")]
pub response_body: ::prost::alloc::string::String,
/// Additional HTTP bindings for the selector. Nested bindings must
/// not contain an `additional_bindings` field themselves (that is,
/// the nesting may only be one level deep).
#[prost(message, repeated, tag = "11")]
pub additional_bindings: ::prost::alloc::vec::Vec<HttpRule>,
/// Determines the URL pattern is matched by this rules. This pattern can be
/// used with any of the {get|put|post|delete|patch} methods. A custom method
/// can be defined using the 'custom' field.
#[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")]
pub pattern: ::core::option::Option<http_rule::Pattern>,
}
/// Nested message and enum types in `HttpRule`.
pub mod http_rule {
/// Determines the URL pattern is matched by this rules. This pattern can be
/// used with any of the {get|put|post|delete|patch} methods. A custom method
/// can be defined using the 'custom' field.
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Pattern {
/// Maps to HTTP GET. Used for listing and getting information about
/// resources.
#[prost(string, tag = "2")]
Get(::prost::alloc::string::String),
/// Maps to HTTP PUT. Used for replacing a resource.
#[prost(string, tag = "3")]
Put(::prost::alloc::string::String),
/// Maps to HTTP POST. Used for creating a resource or performing an action.
#[prost(string, tag = "4")]
Post(::prost::alloc::string::String),
/// Maps to HTTP DELETE. Used for deleting a resource.
#[prost(string, tag = "5")]
Delete(::prost::alloc::string::String),
/// Maps to HTTP PATCH. Used for updating a resource.
#[prost(string, tag = "6")]
Patch(::prost::alloc::string::String),
/// The custom pattern is used for specifying an HTTP method that is not
/// included in the `pattern` field, such as HEAD, or "*" to leave the
/// HTTP method unspecified for this rule. The wild-card rule is useful
/// for services that provide content to Web (HTML) clients.
#[prost(message, tag = "8")]
Custom(super::CustomHttpPattern),
}
}
/// A custom pattern is used for defining custom HTTP verb.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CustomHttpPattern {
/// The name of this custom HTTP verb.
#[prost(string, tag = "1")]
pub kind: ::prost::alloc::string::String,
/// The path matched by this custom verb.
#[prost(string, tag = "2")]
pub path: ::prost::alloc::string::String,
}
/// An indicator of the behavior of a given field (for example, that a field
/// is required in requests, or given as output but ignored as input).
/// This **does not** change the behavior in protocol buffers itself; it only
/// denotes the behavior and may affect how API tooling handles the field.
///
/// Note: This enum **may** receive new values in the future.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum FieldBehavior {
/// Conventional default for enums. Do not use this.
Unspecified = 0,
/// Specifically denotes a field as optional.
/// While all fields in protocol buffers are optional, this may be specified
/// for emphasis if appropriate.
Optional = 1,
/// Denotes a field as required.
/// This indicates that the field **must** be provided as part of the request,
/// and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
Required = 2,
/// Denotes a field as output only.
/// This indicates that the field is provided in responses, but including the
/// field in a request does nothing (the server *must* ignore it and
/// *must not* throw an error as a result of the field's presence).
OutputOnly = 3,
/// Denotes a field as input only.
/// This indicates that the field is provided in requests, and the
/// corresponding field is not included in output.
InputOnly = 4,
/// Denotes a field as immutable.
/// This indicates that the field may be set once in a request to create a
/// resource, but may not be changed thereafter.
Immutable = 5,
/// Denotes that a (repeated) field is an unordered list.
/// This indicates that the service may provide the elements of the list
/// in any arbitrary order, rather than the order the user originally
/// provided. Additionally, the list's order may or may not be stable.
UnorderedList = 6,
/// Denotes that this field returns a non-empty default value if not set.
/// This indicates that if the user provides the empty value in a request,
/// a non-empty value will be returned. The user will not be aware of what
/// non-empty value to expect.
NonEmptyDefault = 7,
}
| 44.982801 | 104 | 0.655506 |
1e55e12a4a5e3bd25dda2b115c34ca8c83c902db
| 4,787 |
#![feature(proc_macro_hygiene, decl_macro)]
#[warn(unused_assignments)]
use std::sync::RwLock;
use rocket::{post, routes, State, Config, config::Environment};
use rocket_contrib::json::Json;
use uuid::Uuid;
mod common;
use common::{Entry, Index, Key, PartySignup, Message};
#[macro_use]
extern crate lazy_static;
mod gg18_sign_client;
pub use gg18_sign_client::{sign, sign_vec};
mod gg18_keygen_client;
pub use gg18_keygen_client::key_gen;
use lru::*;
#[post("/get", format = "json", data = "<request>")]
fn get(
db_mtx: State<RwLock<LruCache<Key,String>>>,
request: Json<Index>,
) -> Json<Result<Entry, ()>> {
let index: Index = request.0;
let mut hm = db_mtx.write().unwrap();
match hm.get(&index.key) {
Some(v) => {
let entry = Entry {
key: index.key,
value: v.clone().to_string(),
};
Json(Ok(entry))
}
None => Json(Err(())),
}
}
#[post("/set", format = "json", data = "<request>")]
fn set(db_mtx: State<RwLock<LruCache<Key,String>>>, request: Json<Entry>) -> Json<Result<(), ()>> {
let entry: Entry = request.0;
let mut hm = db_mtx.write().unwrap();
println!("entry.key: {:?}", entry.key.clone());
hm.put(entry.key.clone(), entry.value.clone());
Json(Ok(()))
}
#[post("/signupkeygen", format = "json")]
fn signup_keygen(db_mtx: State<RwLock<LruCache<Key,String>>>) -> Json<Result<PartySignup, ()>> {
let parties = 3;
let key = "signup-keygen".to_string();
let party_signup = {
let mut hm = db_mtx.write().unwrap();
let value = hm.get(&key).unwrap();
let client_signup: PartySignup = serde_json::from_str(&value).unwrap();
if client_signup.number < parties {
PartySignup {
number: client_signup.number + 1,
uuid: client_signup.uuid,
}
} else {
PartySignup {
number: 1,
uuid: Uuid::new_v4().to_string(),
}
}
};
let mut hm = db_mtx.write().unwrap();
println!("signup - > key {:?}" ,key);
hm.put(key, serde_json::to_string(&party_signup).unwrap());
Json(Ok(party_signup))
}
#[post("/message", format = "json", data = "<request>")]
fn message(db_mtx: State<RwLock<LruCache<String, u64>>>, request: Json<Message>) -> Json<Result<(), ()>> {
let entry: Message = request.0;
let mut value = 0;
let threshold = 1;
let mut h = db_mtx.write().unwrap();
value = match h.get(&entry.key.clone()) {
Some(v) => *v,
None => 0u64,
};
if value < threshold + 1{
value = value + 1;
} else {
return Json(Err(()));
}
println!("entry.key messages is : {:?}", entry.key.clone());
h.put(entry.key.clone(), value.clone());
Json(Ok(()))
}
#[post("/signupsign", format = "json", data = "<request>")]
fn signup_sign(db_mtx: State<RwLock<LruCache<Key,String>>>, request: Json<Message>) -> Json<Result<PartySignup, ()>> {
let threshold = 1 + 1;
let mut key = "signup-sign".to_string();
let entry: String = request.0.key;
key.push_str(&entry);
let party_signup = {
let mut hm = db_mtx.write().unwrap();
let value = match hm.get(&key){
Some(x) => x.clone(),
None => {
let party_signup_sign = PartySignup {
number: 0,
uuid: Uuid::new_v4().to_string(),
};
serde_json::to_string(&party_signup_sign).unwrap()
},
};
let client_signup: PartySignup = serde_json::from_str(&value).unwrap();
if client_signup.number < threshold {
PartySignup {
number: client_signup.number + 1,
uuid: client_signup.uuid,
}
} else {
return Json(Err(()));
}
};
let mut hm = db_mtx.write().unwrap();
hm.put(key, serde_json::to_string(&party_signup).unwrap());
Json(Ok(party_signup))
}
pub fn start_sm_manager() {
let db:LruCache<Key,u64> = LruCache::new(2500);
let db_mtx = RwLock::new(db);
let db2:LruCache<Key,String> = LruCache::new(4500);
let db2_mtx = RwLock::new(db2);
{
let mut hm = db2_mtx.write().unwrap();
hm.put(
"signup-keygen".to_string(),
serde_json::to_string(& PartySignup {
number: 0,
uuid: Uuid::new_v4().to_string(),
}).unwrap(),
);
}
let mut config = Config::new(Environment::Production);
config.set_port(8001);
rocket::custom(config)
.mount("/", routes![get, set, signup_keygen, signup_sign, message])
.manage(db_mtx)
.manage(db2_mtx)
.launch();
}
| 28.837349 | 118 | 0.553583 |
e5828e087f18b02538bac5fe10c0b61ee9403a03
| 79,905 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
pub mod marketplace_agreements {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<DatadogAgreementResourceListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Datadog/agreements",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogAgreementResourceListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
body: Option<&DatadogAgreementResource>,
) -> std::result::Result<DatadogAgreementResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Datadog/agreements/default",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(body) = body {
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogAgreementResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod monitors {
use crate::models::*;
pub async fn list_api_keys(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<DatadogApiKeyListResponse, list_api_keys::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/listApiKeys",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list_api_keys::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_api_keys::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_api_keys::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_api_keys::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogApiKeyListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_api_keys::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_api_keys::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_api_keys::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_api_keys {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_default_key(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<DatadogApiKey, get_default_key::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/getDefaultKey",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(get_default_key::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_default_key::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_default_key::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_default_key::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogApiKey = serde_json::from_slice(rsp_body)
.map_err(|source| get_default_key::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_default_key::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_default_key::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_default_key {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn set_default_key(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
body: Option<&DatadogApiKey>,
) -> std::result::Result<(), set_default_key::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/setDefaultKey",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(set_default_key::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(set_default_key::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(body) = body {
azure_core::to_json(body).map_err(set_default_key::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(set_default_key::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(set_default_key::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| set_default_key::Error::DeserializeError(source, rsp_body.clone()))?;
Err(set_default_key::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod set_default_key {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_hosts(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<DatadogHostListResponse, list_hosts::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/listHosts",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list_hosts::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_hosts::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_hosts::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_hosts::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogHostListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_hosts::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_hosts::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_hosts::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_hosts {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_linked_resources(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<LinkedResourceListResponse, list_linked_resources::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/listLinkedResources",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list_linked_resources::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_linked_resources::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_linked_resources::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_linked_resources::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LinkedResourceListResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_linked_resources::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_linked_resources::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_linked_resources::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_linked_resources {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_monitored_resources(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<MonitoredResourceListResponse, list_monitored_resources::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/listMonitoredResources",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list_monitored_resources::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_monitored_resources::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_monitored_resources::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_monitored_resources::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MonitoredResourceListResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_monitored_resources::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_monitored_resources::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_monitored_resources::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_monitored_resources {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<DatadogMonitorResourceListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Datadog/monitors",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogMonitorResourceListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<DatadogMonitorResourceListResponse, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogMonitorResourceListResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<DatadogMonitorResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
body: Option<&DatadogMonitorResource>,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(body) = body {
azure_core::to_json(body).map_err(create::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: DatadogMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(DatadogMonitorResource),
Created201(DatadogMonitorResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
body: Option<&DatadogMonitorResourceUpdateParameters>,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(body) = body {
azure_core::to_json(body).map_err(update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: DatadogMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(DatadogMonitorResource),
Created201(DatadogMonitorResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn refresh_set_password_link(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<DatadogSetPasswordLink, refresh_set_password_link::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/refreshSetPasswordLink",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(refresh_set_password_link::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(refresh_set_password_link::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(refresh_set_password_link::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(refresh_set_password_link::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogSetPasswordLink = serde_json::from_slice(rsp_body)
.map_err(|source| refresh_set_password_link::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| refresh_set_password_link::Error::DeserializeError(source, rsp_body.clone()))?;
Err(refresh_set_password_link::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod refresh_set_password_link {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod operations {
use crate::models::*;
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Datadog/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod tag_rules {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<MonitoringTagRulesListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/tagRules",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MonitoringTagRulesListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
rule_set_name: &str,
) -> std::result::Result<MonitoringTagRules, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/tagRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name,
rule_set_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MonitoringTagRules =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
rule_set_name: &str,
body: Option<&MonitoringTagRules>,
) -> std::result::Result<MonitoringTagRules, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/tagRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name,
rule_set_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(body) = body {
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MonitoringTagRules = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod single_sign_on_configurations {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<DatadogSingleSignOnResourceListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/singleSignOnConfigurations",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogSingleSignOnResourceListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
configuration_name: &str,
) -> std::result::Result<DatadogSingleSignOnResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/singleSignOnConfigurations/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name,
configuration_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogSingleSignOnResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
configuration_name: &str,
body: Option<&DatadogSingleSignOnResource>,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Datadog/monitors/{}/singleSignOnConfigurations/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name,
configuration_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = if let Some(body) = body {
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DatadogSingleSignOnResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: DatadogSingleSignOnResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(DatadogSingleSignOnResource),
Created201(DatadogSingleSignOnResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
| 48.368644 | 137 | 0.588849 |
914968d90b5481596f82cec2c4a359e3ed661068
| 18,315 |
use const_fnv1a_hash::fnv1a_hash_str_64;
use proc_macro::TokenStream;
use quote::{quote, format_ident};
use syn::{parse_macro_input, DeriveInput};
#[proc_macro_derive(BitType)]
pub fn bit_type(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let vis = input.vis;
let ident = input.ident;
let generics = input.generics;
match input.data {
syn::Data::Struct(data) => match data.fields {
syn::Fields::Named(fields) => {
let field_idents: Vec<_> = fields
.named
.iter()
.map(|field| field.ident.clone().unwrap())
.collect();
let field_ident_id: Vec<_> = field_idents.iter().map(|field| fnv1a_hash_str_64(field.to_string().as_str()) as usize).collect();
let field_types: Vec<_> =
fields.named.iter().map(|field| field.ty.clone()).collect();
let field_type_offsets: Vec<Vec<_>> = field_types.iter().enumerate().map(|(i, _)| field_types.iter().take(i).collect()).collect();
quote! {
#(
impl #generics bitgen::TupleAccess<#field_ident_id> for #ident #generics {
type Element = #field_types;
const BIT_OFFSET: usize = 0#(+<#field_type_offsets as bitgen::BitType>::BITS)*;
}
)*
impl #generics bitgen::BitType for #ident #generics {
const BITS: usize = 0#(+<#field_types as bitgen::BitType>::BITS)*;
fn from_aligned(aligned: &Self, slice: &mut [u8], mut offset: usize) {
#(
<#field_types as bitgen::BitType>::from_aligned(&aligned.#field_idents, &mut slice[internal::get_byte_range(offset, <#field_types as bitgen::BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
)*
}
fn to_aligned(slice: &[u8], mut offset: usize) -> Self {
Self {
#(
#field_idents: {
let res = <#field_types as bitgen::BitType>::to_aligned(&slice[bitgen::internal::get_byte_range(offset, <#field_types as bitgen::BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
res
}
),*
}
}
}
}
}
syn::Fields::Unnamed(fields) => {
let field_idents: Vec<_> = fields
.unnamed
.iter()
.enumerate()
.map(|(i, _)| syn::Index::from(i))
.collect();
let field_types: Vec<_> =
fields.unnamed.iter().map(|field| field.ty.clone()).collect();
let field_type_offsets: Vec<Vec<_>> = field_types.iter().enumerate().map(|(i, _)| field_types.iter().take(i).collect()).collect();
quote! {
#(
impl #generics bitgen::TupleAccess<#field_idents> for #ident #generics {
type Element = #field_types;
const BIT_OFFSET: usize = 0#(+<#field_type_offsets as bitgen::BitType>::BITS)*;
}
)*
impl #generics bitgen::BitType for #ident #generics {
const BITS: usize = 0#(+<#field_types as bitgen::BitType>::BITS)*;
fn from_aligned(aligned: &Self, slice: &mut [u8], mut offset: usize) {
#(
<#field_types as bitgen::BitType>::from_aligned(&aligned.#field_idents, &mut slice[bitgen::internal::get_byte_range(offset, <#field_types as bitgen::BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
)*
}
fn to_aligned(slice: &[u8], mut offset: usize) -> Self {
Self(
#(
{
let res = <#field_types as bitgen::BitType>::to_aligned(&slice[bitgen::internal::get_byte_range(offset, <#field_types as bitgen::BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
res
}
),*
)
}
}
}
},
syn::Fields::Unit => {
quote! {
impl #generics bitgen::BitType for #ident #generics {
const BITS: usize = 0;
fn from_aligned(aligned: &Self, slice: &mut [u8], mut offset: usize) {}
fn to_aligned(slice: &[u8], offset: usize) -> Self { Self }
}
}
}
},
syn::Data::Enum(data) => {
let num_variants = data.variants.len();
if num_variants == 0 {
panic!("Cannot implement on an enum with no variants");
}
let bits_to_represent = if num_variants == 0 { 0 } else { std::mem::size_of::<usize>() * 8 - (num_variants - 1).leading_zeros() as usize };
let variant_fields: Vec<_> = data.variants.iter()
.filter_map(|variant| {
match &variant.fields {
syn::Fields::Named(fields) => Some(quote!{#fields}),
syn::Fields::Unnamed(fields) => Some(quote!{#fields;}),
syn::Fields::Unit => None,
}
}).collect();
let unit_idents: Vec<_> = data.variants.iter()
.filter(|variant| matches!(variant.fields, syn::Fields::Unit))
.map(|variant| variant.ident.clone()).collect();
let unit_ident_ids: Vec<_> = unit_idents.iter().map(|ident| fnv1a_hash_str_64(ident.to_string().as_str()) as usize).collect();
let unit_idents_index: Vec<quote::__private::TokenStream> = data.variants.iter()
.enumerate()
.filter(|(_, variant)| matches!(variant.fields, syn::Fields::Unit))
.map(|(i, _)| proc_macro::Literal::usize_unsuffixed(i).to_string().parse::<TokenStream>().unwrap().into()).collect();
let idents: Vec<_> = data.variants.iter()
.filter(|variant| matches!(variant.fields, syn::Fields::Unnamed(_) | syn::Fields::Named(_)))
.map(|variant| variant.ident.clone()).collect();
let ident_ids: Vec<_> = idents.iter().map(|ident| fnv1a_hash_str_64(ident.to_string().as_str()) as usize).collect();
let idents_index: Vec<quote::__private::TokenStream> = data.variants.iter()
.enumerate()
.filter(|(_, variant)| matches!(variant.fields, syn::Fields::Unnamed(_) | syn::Fields::Named(_)))
.map(|(i, _)| proc_macro::Literal::usize_unsuffixed(i).to_string().parse::<TokenStream>().unwrap().into()).collect();
let field_idents: Vec<Vec<_>> = data.variants.iter()
.filter_map(|variant|
match &variant.fields {
syn::Fields::Named(fields) => Some(fields.named.iter().map(|field| syn::Member::Named(field.ident.clone().unwrap())).collect()),
syn::Fields::Unnamed(fields) => Some(fields.unnamed.iter().enumerate().map(|(i, _)| syn::Member::Unnamed(syn::Index::from(i))).collect()),
syn::Fields::Unit => None,
}).collect();
let field_types: Vec<Vec<_>> = data.variants.iter()
.filter_map(|variant|
match &variant.fields {
syn::Fields::Named(fields) => Some(fields.named.iter().map(|field| field.ty.clone()).collect()),
syn::Fields::Unnamed(fields) => Some(fields.unnamed.iter().map(|field| field.ty.clone()).collect()),
syn::Fields::Unit => None,
}).collect();
let uuid = uuid::Uuid::new_v4();
let unique_wrapper_ident = format_ident!("Wrap{}{}", ident, uuid.to_string().replace('-', ""));
let unique_idents = idents.iter().map(|id| format_ident!("{}{}{}", ident, id, uuid.to_string().replace('-', "")));
let captured_field_idents: Vec<Vec<_>> = data.variants.iter()
.filter_map(|variant|
match &variant.fields {
syn::Fields::Named(fields) => Some(fields.named.iter().map(|field| format_ident!("t_{}_{}", syn::Member::Named(field.ident.clone().unwrap()), uuid.to_string().replace('-', "_"))).collect()),
syn::Fields::Unnamed(fields) => Some(fields.unnamed.iter().enumerate().map(|(i, _)| format_ident!("t_{}_{}", syn::Member::Unnamed(syn::Index::from(i)), uuid.to_string().replace('-', "_"))).collect()),
syn::Fields::Unit => None,
}).collect();
let field_ident_id: Vec<Vec<_>> = field_idents.iter().map(|fields| fields.iter().map(|field| match field {
syn::Member::Named(ident) => fnv1a_hash_str_64(ident.to_string().as_str()) as usize,
syn::Member::Unnamed(index) => index.index as usize,
}).collect()).collect();
let field_type_offsets: Vec<Vec<Vec<_>>> = field_types.iter().map(|types| types.iter().enumerate().map(|(i, _)| {
types.iter().take(i).collect()
}).collect()).collect();
let implementation = if num_variants == 1 {
quote! {
impl #generics bitgen::BitType for #ident #generics {
const BITS: usize = #unique_wrapper_ident(0)#(
.max(0#(
+ <#field_types as bitgen::BitType>::BITS
)*)
)*.0;
fn from_aligned(aligned: &Self, slice: &mut [u8], mut offset: usize) {
#(
if let Self::#idents { #(#field_idents: #captured_field_idents @ _,)* } = aligned {
#(
<#field_types as bitgen::BitType>::from_aligned(#captured_field_idents, &mut slice[bitgen::internal::get_byte_range(offset, <#field_types as BitType>::BITS)], offset % 8);
offset += <#field_types as BitType>::BITS;
)*
}
)*
}
fn to_aligned(slice: &[u8], mut offset: usize) -> Self {
#(
Self::#unit_idents
)*
#(
Self::#idents {
#(
#field_idents: {
let res = <#field_types as bitgen::BitType>::to_aligned(&slice[bitgen::internal::get_byte_range(offset, <#field_types as bitgen::BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
res
},
)*
}
)*
}
}
}
} else {
quote! {
impl #generics bitgen::BitType for #ident #generics {
const BITS: usize = #bits_to_represent + #unique_wrapper_ident(0)#(
.max(0#(
+ <#field_types as bitgen::BitType>::BITS
)*)
)*.0;
fn from_aligned(aligned: &Self, slice: &mut [u8], mut offset: usize) {
match &aligned {
#(
Self::#unit_idents => {
bitgen::U::<#bits_to_represent>::from_aligned(&bitgen::U::new(#unit_idents_index), &mut slice[bitgen::internal::get_byte_range(offset, #bits_to_represent)], offset);
},
)*
#(
Self::#idents { #(#field_idents: #captured_field_idents @ _,)* } => {
let range = bitgen::internal::get_byte_range(offset, #bits_to_represent);
bitgen::U::<#bits_to_represent>::from_aligned(&bitgen::U::new(#idents_index), &mut slice[range], offset);
offset += #bits_to_represent;
#(
<#field_types as bitgen::BitType>::from_aligned(#captured_field_idents, &mut slice[bitgen::internal::get_byte_range(offset, <#field_types as BitType>::BITS)], offset % 8);
offset += <#field_types as BitType>::BITS;
)*
},
)*
}
}
fn to_aligned(slice: &[u8], mut offset: usize) -> Self {
let underlying = bitgen::U::<#bits_to_represent>::to_aligned(&slice[bitgen::internal::get_byte_range(offset, #bits_to_represent)], offset);
offset += #bits_to_represent;
match underlying.extract_underlying() {
#(#unit_idents_index => Self::#unit_idents,)*
#(#idents_index => Self::#idents {
#(#field_idents: {
let res = <#field_types as bitgen::BitType>::to_aligned(&slice[bitgen::internal::get_byte_range(offset, <#field_types as BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
res
}), *
},)*
_ => unreachable!(),
}
}
}
}
};
quote! {
#(
impl #generics bitgen::MaybeAccess<#unit_ident_ids> for #ident #generics {
type Element = ();
const BIT_OFFSET: usize = #bits_to_represent;
const EXPECTED: u32 = #unit_idents_index;
}
)*
#(
#vis struct #unique_idents #generics #variant_fields
#(
impl #generics bitgen::TupleAccess<#field_ident_id> for #unique_idents #generics {
type Element = #field_types;
const BIT_OFFSET: usize = 0 #( + <#field_type_offsets as bitgen::BitType>::BITS)*;
}
)*
impl #generics bitgen::MaybeAccess<#ident_ids> for #ident #generics {
type Element = #unique_idents #generics;
const BIT_OFFSET: usize = #bits_to_represent;
const EXPECTED: u32 = #idents_index;
}
impl #generics bitgen::BitType for #unique_idents #generics {
const BITS: usize = 0#(+<#field_types as bitgen::BitType>::BITS)*;
fn from_aligned(aligned: &Self, slice: &mut [u8], mut offset: usize) {
#(
<#field_types as bitgen::BitType>::from_aligned(&aligned.#field_idents, &mut slice[bitgen::internal::get_byte_range(offset, <#field_types as bitgen::BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
)*
}
fn to_aligned(slice: &[u8], mut offset: usize) -> Self {
Self {
#(
#field_idents: {
let res = <#field_types as bitgen::BitType>::to_aligned(&slice[bitgen::internal::get_byte_range(offset, <#field_types as bitgen::BitType>::BITS)], offset % 8);
offset += <#field_types as bitgen::BitType>::BITS;
res
},
)*
}
}
}
)*
struct #unique_wrapper_ident(usize);
impl #unique_wrapper_ident {
const fn max(&self, other: usize) -> Self {
#unique_wrapper_ident([self.0, other][(self.0 < other) as usize])
}
}
#implementation
}
},
syn::Data::Union(_) => todo!(),
}
.into()
}
| 55.332326 | 224 | 0.42932 |
084e08c807eb8fce5141724d2856b37d20acbea4
| 8,703 |
use crate::{clock::Epoch, program_error::ProgramError, pubkey::Pubkey};
use std::{
cell::{Ref, RefCell, RefMut},
cmp, fmt,
rc::Rc,
};
/// Account information
#[derive(Clone)]
pub struct AccountInfo<'a> {
/// Public key of the account
pub key: &'a Pubkey,
/// Was the transaction signed by this account's public key?
pub is_signer: bool,
/// Is the account writable?
pub is_writable: bool,
/// The lamports in the account. Modifiable by programs.
pub lamports: Rc<RefCell<&'a mut u64>>,
/// The data held in this account. Modifiable by programs.
pub data: Rc<RefCell<&'a mut [u8]>>,
/// Program that owns this account
pub owner: &'a Pubkey,
/// This account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// The epoch at which this account will next owe rent
pub rent_epoch: Epoch,
}
impl<'a> fmt::Debug for AccountInfo<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let data_len = cmp::min(64, self.data_len());
let data_str = if data_len > 0 {
format!(
" data: {} ...",
hex::encode(self.data.borrow()[..data_len].to_vec())
)
} else {
"".to_string()
};
write!(
f,
"AccountInfo {{ key: {} owner: {} is_signer: {} is_writable: {} executable: {} rent_epoch: {} lamports: {} data.len: {} {} }}",
self.key,
self.owner,
self.is_signer,
self.is_writable,
self.executable,
self.rent_epoch,
self.lamports(),
self.data_len(),
data_str,
)
}
}
impl<'a> AccountInfo<'a> {
pub fn signer_key(&self) -> Option<&Pubkey> {
if self.is_signer {
Some(self.key)
} else {
None
}
}
pub fn unsigned_key(&self) -> &Pubkey {
self.key
}
pub fn lamports(&self) -> u64 {
**self.lamports.borrow()
}
pub fn try_lamports(&self) -> Result<u64, ProgramError> {
Ok(**self.try_borrow_lamports()?)
}
pub fn data_len(&self) -> usize {
self.data.borrow().len()
}
pub fn try_data_len(&self) -> Result<usize, ProgramError> {
Ok(self.try_borrow_data()?.len())
}
pub fn data_is_empty(&self) -> bool {
self.data.borrow().is_empty()
}
pub fn try_data_is_empty(&self) -> Result<bool, ProgramError> {
Ok(self.try_borrow_data()?.is_empty())
}
pub fn try_borrow_lamports(&self) -> Result<Ref<&mut u64>, ProgramError> {
self.lamports
.try_borrow()
.map_err(|_| ProgramError::AccountBorrowFailed)
}
pub fn try_borrow_mut_lamports(&self) -> Result<RefMut<&'a mut u64>, ProgramError> {
self.lamports
.try_borrow_mut()
.map_err(|_| ProgramError::AccountBorrowFailed)
}
pub fn try_borrow_data(&self) -> Result<Ref<&mut [u8]>, ProgramError> {
self.data
.try_borrow()
.map_err(|_| ProgramError::AccountBorrowFailed)
}
pub fn try_borrow_mut_data(&self) -> Result<RefMut<&'a mut [u8]>, ProgramError> {
self.data
.try_borrow_mut()
.map_err(|_| ProgramError::AccountBorrowFailed)
}
pub fn new(
key: &'a Pubkey,
is_signer: bool,
is_writable: bool,
lamports: &'a mut u64,
data: &'a mut [u8],
owner: &'a Pubkey,
executable: bool,
rent_epoch: Epoch,
) -> Self {
Self {
key,
is_signer,
is_writable,
lamports: Rc::new(RefCell::new(lamports)),
data: Rc::new(RefCell::new(data)),
owner,
executable,
rent_epoch,
}
}
pub fn deserialize_data<T: serde::de::DeserializeOwned>(&self) -> Result<T, bincode::Error> {
bincode::deserialize(&self.data.borrow())
}
pub fn serialize_data<T: serde::Serialize>(&mut self, state: &T) -> Result<(), bincode::Error> {
if bincode::serialized_size(state)? > self.data_len() as u64 {
return Err(Box::new(bincode::ErrorKind::SizeLimit));
}
bincode::serialize_into(&mut self.data.borrow_mut()[..], state)
}
}
/// Constructs an `AccountInfo` from self, used in conversion implementations.
pub trait IntoAccountInfo<'a> {
fn into_account_info(self) -> AccountInfo<'a>;
}
impl<'a, T: IntoAccountInfo<'a>> From<T> for AccountInfo<'a> {
fn from(src: T) -> Self {
src.into_account_info()
}
}
/// Provides information required to construct an `AccountInfo`, used in
/// conversion implementations.
pub trait Account {
fn get(&mut self) -> (&mut u64, &mut [u8], &Pubkey, bool, Epoch);
}
/// Convert (&'a Pubkey, &'a mut T) where T: Account into an `AccountInfo`
impl<'a, T: Account> IntoAccountInfo<'a> for (&'a Pubkey, &'a mut T) {
fn into_account_info(self) -> AccountInfo<'a> {
let (key, account) = self;
let (lamports, data, owner, executable, rent_epoch) = account.get();
AccountInfo::new(
key, false, false, lamports, data, owner, executable, rent_epoch,
)
}
}
/// Convert (&'a Pubkey, bool, &'a mut T) where T: Account into an
/// `AccountInfo`.
impl<'a, T: Account> IntoAccountInfo<'a> for (&'a Pubkey, bool, &'a mut T) {
fn into_account_info(self) -> AccountInfo<'a> {
let (key, is_signer, account) = self;
let (lamports, data, owner, executable, rent_epoch) = account.get();
AccountInfo::new(
key, is_signer, false, lamports, data, owner, executable, rent_epoch,
)
}
}
/// Convert &'a mut (Pubkey, T) where T: Account into an `AccountInfo`.
impl<'a, T: Account> IntoAccountInfo<'a> for &'a mut (Pubkey, T) {
fn into_account_info(self) -> AccountInfo<'a> {
let (ref key, account) = self;
let (lamports, data, owner, executable, rent_epoch) = account.get();
AccountInfo::new(
key, false, false, lamports, data, owner, executable, rent_epoch,
)
}
}
/// Return the next `AccountInfo` or a `NotEnoughAccountKeys` error.
pub fn next_account_info<'a, 'b, I: Iterator<Item = &'a AccountInfo<'b>>>(
iter: &mut I,
) -> Result<I::Item, ProgramError> {
iter.next().ok_or(ProgramError::NotEnoughAccountKeys)
}
/// Return a slice of the next `count` `AccountInfo`s or a
/// `NotEnoughAccountKeys` error.
pub fn next_account_infos<'a, 'b: 'a>(
iter: &mut std::slice::Iter<'a, AccountInfo<'b>>,
count: usize,
) -> Result<&'a [AccountInfo<'b>], ProgramError> {
let accounts = iter.as_slice();
if accounts.len() < count {
return Err(ProgramError::NotEnoughAccountKeys);
}
let (accounts, remaining) = accounts.split_at(count);
*iter = remaining.iter();
Ok(accounts)
}
impl<'a> AsRef<AccountInfo<'a>> for AccountInfo<'a> {
fn as_ref(&self) -> &AccountInfo<'a> {
self
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_next_account_infos() {
let k1 = Pubkey::new_unique();
let k2 = Pubkey::new_unique();
let k3 = Pubkey::new_unique();
let k4 = Pubkey::new_unique();
let k5 = Pubkey::new_unique();
let l1 = &mut 0;
let l2 = &mut 0;
let l3 = &mut 0;
let l4 = &mut 0;
let l5 = &mut 0;
let d1 = &mut [0u8];
let d2 = &mut [0u8];
let d3 = &mut [0u8];
let d4 = &mut [0u8];
let d5 = &mut [0u8];
let infos = &[
AccountInfo::new(&k1, false, false, l1, d1, &k1, false, 0),
AccountInfo::new(&k2, false, false, l2, d2, &k2, false, 0),
AccountInfo::new(&k3, false, false, l3, d3, &k3, false, 0),
AccountInfo::new(&k4, false, false, l4, d4, &k4, false, 0),
AccountInfo::new(&k5, false, false, l5, d5, &k5, false, 0),
];
let infos_iter = &mut infos.iter();
let info1 = next_account_info(infos_iter).unwrap();
let info2_3_4 = next_account_infos(infos_iter, 3).unwrap();
let info5 = next_account_info(infos_iter).unwrap();
assert_eq!(k1, *info1.key);
assert_eq!(k2, *info2_3_4[0].key);
assert_eq!(k3, *info2_3_4[1].key);
assert_eq!(k4, *info2_3_4[2].key);
assert_eq!(k5, *info5.key);
}
#[test]
fn test_account_info_as_ref() {
let k = Pubkey::new_unique();
let l = &mut 0;
let d = &mut [0u8];
let info = AccountInfo::new(&k, false, false, l, d, &k, false, 0);
assert_eq!(info.key, info.as_ref().key);
}
}
| 31.193548 | 139 | 0.566586 |
c1bef539d374086a34159bba0b3696b8e5fe7718
| 61,865 |
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Facade and handler for the rest of the blockchain implementation
//! and mostly the chain pipeline.
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::{
Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier,
SegmentIdentifier, Transaction, TxKernel,
};
use crate::core::core::{
BlockTokenSums, TokenIssueProof, TokenOutput, TokenOutputIdentifier, TokenTxKernel,
};
use crate::core::global;
use crate::core::pow;
use crate::core::ser::ProtocolVersion;
use crate::error::{Error, ErrorKind};
use crate::pipe;
use crate::store;
use crate::txhashset;
use crate::txhashset::{PMMRHandle, Segmenter, TxHashSet};
use crate::types::{
BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus,
};
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::{util::RwLock, ChainStore};
use grin_core::ser;
use grin_store::Error::NotFoundErr;
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::{collections::HashMap, io::Cursor};
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
pub const MAX_ORPHAN_SIZE: usize = 200;
/// When evicting, very old orphans are evicted first
const MAX_ORPHAN_AGE_SECS: u64 = 300;
#[derive(Debug, Clone)]
struct Orphan {
block: Block,
opts: Options,
added: Instant,
}
pub struct OrphanBlockPool {
// blocks indexed by their hash
orphans: RwLock<HashMap<Hash, Orphan>>,
// additional index of height -> hash
// so we can efficiently identify a child block (ex-orphan) after processing a block
height_idx: RwLock<HashMap<u64, Vec<Hash>>>,
// accumulated number of evicted block because of MAX_ORPHAN_SIZE limitation
evicted: AtomicUsize,
}
impl OrphanBlockPool {
fn new() -> OrphanBlockPool {
OrphanBlockPool {
orphans: RwLock::new(HashMap::new()),
height_idx: RwLock::new(HashMap::new()),
evicted: AtomicUsize::new(0),
}
}
fn len(&self) -> usize {
let orphans = self.orphans.read();
orphans.len()
}
fn len_evicted(&self) -> usize {
self.evicted.load(Ordering::Relaxed)
}
fn add(&self, orphan: Orphan) {
let mut orphans = self.orphans.write();
let mut height_idx = self.height_idx.write();
{
let height_hashes = height_idx
.entry(orphan.block.header.height)
.or_insert_with(|| vec![]);
height_hashes.push(orphan.block.hash());
orphans.insert(orphan.block.hash(), orphan);
}
if orphans.len() > MAX_ORPHAN_SIZE {
let old_len = orphans.len();
// evict too old
orphans.retain(|_, ref mut x| {
x.added.elapsed() < Duration::from_secs(MAX_ORPHAN_AGE_SECS)
});
// evict too far ahead
let mut heights = height_idx.keys().cloned().collect::<Vec<u64>>();
heights.sort_unstable();
for h in heights.iter().rev() {
if let Some(hs) = height_idx.remove(h) {
for h in hs {
let _ = orphans.remove(&h);
}
}
if orphans.len() < MAX_ORPHAN_SIZE {
break;
}
}
// cleanup index
height_idx.retain(|_, ref mut xs| xs.iter().any(|x| orphans.contains_key(&x)));
self.evicted
.fetch_add(old_len - orphans.len(), Ordering::Relaxed);
}
}
/// Get an orphan from the pool indexed by the hash of its parent, removing
/// it at the same time, preventing clone
fn remove_by_height(&self, height: u64) -> Option<Vec<Orphan>> {
let mut orphans = self.orphans.write();
let mut height_idx = self.height_idx.write();
height_idx
.remove(&height)
.map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect())
}
pub fn contains(&self, hash: &Hash) -> bool {
let orphans = self.orphans.read();
orphans.contains_key(hash)
}
}
/// Facade to the blockchain block processing pipeline and storage. Provides
/// the current view of the TxHashSet according to the chain state. Also
/// maintains locking for the pipeline to avoid conflicting processing.
pub struct Chain {
db_root: String,
store: Arc<store::ChainStore>,
adapter: Arc<dyn ChainAdapter + Send + Sync>,
orphans: Arc<OrphanBlockPool>,
txhashset: Arc<RwLock<txhashset::TxHashSet>>,
header_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>,
pibd_segmenter: Arc<RwLock<Option<Segmenter>>>,
// POW verification function
pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
archive_mode: bool,
genesis: BlockHeader,
}
impl Chain {
/// Initializes the blockchain and returns a new Chain instance. Does a
/// check on the current chain head to make sure it exists and creates one
/// based on the genesis block if necessary.
pub fn init(
db_root: String,
adapter: Arc<dyn ChainAdapter + Send + Sync>,
genesis: Block,
pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
archive_mode: bool,
) -> Result<Chain, Error> {
let store = Arc::new(store::ChainStore::new(&db_root)?);
// DB migrations to be run prior to the chain being used.
// Migrate full blocks to protocol version v3.
Chain::migrate_db_v3_to_v4(&store)?;
// open the txhashset, creating a new one if necessary
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?;
let mut header_pmmr = PMMRHandle::new(
Path::new(&db_root).join("header").join("header_head"),
false,
ProtocolVersion(1),
None,
)?;
setup_head(&genesis, &store, &mut header_pmmr, &mut txhashset)?;
// Initialize the output_pos index based on UTXO set
// and NRD kernel_pos index based recent kernel history.
{
let batch = store.batch()?;
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
txhashset.init_token_output_pos_index(&header_pmmr, &batch)?;
txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?;
batch.commit()?;
}
let chain = Chain {
db_root,
store,
adapter,
orphans: Arc::new(OrphanBlockPool::new()),
txhashset: Arc::new(RwLock::new(txhashset)),
header_pmmr: Arc::new(RwLock::new(header_pmmr)),
pibd_segmenter: Arc::new(RwLock::new(None)),
pow_verifier,
archive_mode,
genesis: genesis.header,
};
chain.log_heads()?;
// Temporarily exercising the initialization process.
// Note: This is *really* slow because we are starting from cold.
//
// This is not required as we will lazily initialize our segmenter as required
// once we start receiving PIBD segment requests.
// In reality we will do this based on PIBD segment requests.
// Initialization (once per 12 hour period) will not be this slow once lmdb and PMMRs
// are warmed up.
if let Ok(segmenter) = chain.segmenter() {
let _ = segmenter.kernel_segment(SegmentIdentifier { height: 9, idx: 0 });
let _ = segmenter.bitmap_segment(SegmentIdentifier { height: 9, idx: 0 });
let _ = segmenter.output_segment(SegmentIdentifier { height: 11, idx: 0 });
let _ = segmenter.rangeproof_segment(SegmentIdentifier { height: 7, idx: 0 });
}
Ok(chain)
}
/// Are we running with archive_mode enabled?
pub fn archive_mode(&self) -> bool {
self.archive_mode
}
/// Return our shared header MMR handle.
pub fn header_pmmr(&self) -> Arc<RwLock<PMMRHandle<BlockHeader>>> {
self.header_pmmr.clone()
}
/// Return our shared txhashset instance.
pub fn txhashset(&self) -> Arc<RwLock<TxHashSet>> {
self.txhashset.clone()
}
/// Shared store instance.
pub fn store(&self) -> Arc<store::ChainStore> {
self.store.clone()
}
fn log_heads(&self) -> Result<(), Error> {
let log_head = |name, head: Tip| {
debug!(
"{}: {} @ {} [{}]",
name,
head.total_difficulty.to_num(),
head.height,
head.hash(),
);
};
log_head("head", self.head()?);
log_head("header_head", self.header_head()?);
Ok(())
}
/// Processes a single block, then checks for orphans, processing
/// those as well if they're found
pub fn process_block(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
let height = b.header.height;
let res = self.process_block_single(b, opts);
if res.is_ok() {
self.check_orphans(height + 1);
}
res
}
/// We plan to support receiving blocks with CommitOnly inputs.
/// We also need to support relaying blocks with FeaturesAndCommit inputs to peers.
/// So we need a way to convert blocks from CommitOnly to FeaturesAndCommit.
/// Validating the inputs against the utxo_view allows us to look the outputs up.
pub fn convert_block_v3(&self, block: Block) -> Result<Block, Error> {
debug!(
"convert_block_v3: {} at {} ({} -> v3)",
block.header.hash(),
block.header.height,
block.inputs().version_str(),
);
if block.inputs().is_empty() {
return Ok(Block {
header: block.header,
aux_data: block.aux_data,
body: block.body.replace_inputs(Inputs::FeaturesAndCommit(vec![])),
});
}
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let inputs: Vec<_> =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
let previous_header = batch.get_previous_header(&block.header)?;
pipe::rewind_and_apply_fork(&previous_header, ext, batch)?;
ext.extension
.utxo_view(ext.header_extension)
.validate_inputs(&block.inputs(), batch)
.map(|outputs| outputs.into_iter().map(|(out, _)| out).collect())
})?;
let inputs = inputs.as_slice().into();
Ok(Block {
header: block.header,
aux_data: block.aux_data,
body: block.body.replace_inputs(inputs),
})
}
fn determine_status(
&self,
head: Option<Tip>,
prev: Tip,
prev_head: Tip,
fork_point: Tip,
) -> BlockStatus {
// If head is updated then we are either "next" block or we just experienced a "reorg" to new head.
// Otherwise this is a "fork" off the main chain.
if let Some(head) = head {
if head.prev_block_h == prev_head.last_block_h {
BlockStatus::Next { prev }
} else {
BlockStatus::Reorg {
prev,
prev_head,
fork_point,
}
}
} else {
BlockStatus::Fork {
prev,
head: prev_head,
fork_point,
}
}
}
/// Quick check for "known" duplicate block up to and including current chain head.
fn is_known(&self, header: &BlockHeader) -> Result<(), Error> {
let head = self.head()?;
if head.hash() == header.hash() {
return Err(ErrorKind::Unfit("duplicate block".into()).into());
}
if header.total_difficulty() <= head.total_difficulty {
if self.block_exists(header.hash())? {
return Err(ErrorKind::Unfit("duplicate block".into()).into());
}
}
Ok(())
}
// Check if the provided block is an orphan.
// If block is an orphan add it to our orphan block pool for deferred processing.
// If this is the "next" block immediately following current head then not an orphan.
// Or if we have the previous full block then not an orphan.
fn check_orphan(&self, block: &Block, opts: Options) -> Result<(), Error> {
let head = self.head()?;
let is_next = block.header.prev_hash == head.last_block_h;
if is_next || self.block_exists(block.header.prev_hash)? {
return Ok(());
}
let block_hash = block.hash();
let orphan = Orphan {
block: block.clone(),
opts,
added: Instant::now(),
};
self.orphans.add(orphan);
debug!(
"is_orphan: {:?}, # orphans {}{}",
block_hash,
self.orphans.len(),
if self.orphans.len_evicted() > 0 {
format!(", # evicted {}", self.orphans.len_evicted())
} else {
String::new()
},
);
Err(ErrorKind::Orphan.into())
}
/// Attempt to add a new block to the chain.
/// Returns true if it has been added to the longest chain
/// or false if it has added to a fork (or orphan?).
fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
// Check if we already know about this block.
self.is_known(&b.header)?;
// Process the header first.
// If invalid then fail early.
// If valid then continue with block processing with header_head committed to db etc.
self.process_block_header(&b.header, opts)?;
// Check if this block is an orphan.
// Only do this once we know the header PoW is valid.
self.check_orphan(&b, opts)?;
// We can only reliably convert to "v3" if not an orphan (may spend output from previous block).
// We convert from "v4" to "v3" by looking up outputs to be spent.
// This conversion also ensures a block received in "v3" has valid input features (prevents malleability).
let b = self.convert_block_v3(b)?;
let (head, fork_point, prev_head) = {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?;
let prev_head = batch.head()?;
let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?;
let (head, fork_point) = pipe::process_block(&b, &mut ctx)?;
ctx.batch.commit()?;
// release the lock and let the batch go before post-processing
(head, fork_point, prev_head)
};
let prev = self.get_previous_header(&b.header)?;
let status = self.determine_status(
head,
Tip::from_header(&prev),
prev_head,
Tip::from_header(&fork_point),
);
// notifying other parts of the system of the update
self.adapter.block_accepted(&b, status, opts);
Ok(head)
}
/// Process a block header received during "header first" propagation.
/// Note: This will update header MMR and corresponding header_head
/// if total work increases (on the header chain).
pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?;
let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?;
pipe::process_block_header(bh, &mut ctx)?;
ctx.batch.commit()?;
Ok(())
}
/// Attempt to add new headers to the header chain (or fork).
/// This is only ever used during sync and is based on sync_head.
/// We update header_head here if our total work increases.
/// Returns the new sync_head (may temporarily diverge from header_head when syncing a long fork).
pub fn sync_block_headers(
&self,
headers: &[BlockHeader],
sync_head: Tip,
opts: Options,
) -> Result<Option<Tip>, Error> {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?;
// Sync the chunk of block headers, updating header_head if total work increases.
let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?;
let sync_head = pipe::process_block_headers(headers, sync_head, &mut ctx)?;
ctx.batch.commit()?;
Ok(sync_head)
}
/// Build a new block processing context.
pub fn new_ctx<'a>(
&self,
opts: Options,
batch: store::Batch<'a>,
header_pmmr: &'a mut txhashset::PMMRHandle<BlockHeader>,
txhashset: &'a mut txhashset::TxHashSet,
) -> Result<pipe::BlockContext<'a>, Error> {
Ok(pipe::BlockContext {
opts,
pow_verifier: self.pow_verifier,
header_pmmr,
txhashset,
batch,
})
}
/// Check if hash is for a known orphan.
pub fn is_orphan(&self, hash: &Hash) -> bool {
self.orphans.contains(hash)
}
/// Get the OrphanBlockPool accumulated evicted number of blocks
pub fn orphans_evicted_len(&self) -> usize {
self.orphans.len_evicted()
}
/// Check for orphans, once a block is successfully added
fn check_orphans(&self, mut height: u64) {
let initial_height = height;
// Is there an orphan in our orphans that we can now process?
loop {
trace!(
"check_orphans: at {}, # orphans {}",
height,
self.orphans.len(),
);
let mut orphan_accepted = false;
let mut height_accepted = height;
if let Some(orphans) = self.orphans.remove_by_height(height) {
let orphans_len = orphans.len();
for (i, orphan) in orphans.into_iter().enumerate() {
debug!(
"check_orphans: get block {} at {}{}",
orphan.block.hash(),
height,
if orphans_len > 1 {
format!(", no.{} of {} orphans", i, orphans_len)
} else {
String::new()
},
);
let height = orphan.block.header.height;
let res = self.process_block_single(orphan.block, orphan.opts);
if res.is_ok() {
orphan_accepted = true;
height_accepted = height;
}
}
if orphan_accepted {
// We accepted a block, so see if we can accept any orphans
height = height_accepted + 1;
continue;
}
}
break;
}
if initial_height != height {
debug!(
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
height - initial_height,
initial_height,
self.orphans.len(),
);
}
}
/// Returns Ok(Some((out, pos))) if output is unspent.
/// Returns Ok(None) if output is spent.
/// Returns Err if something went wrong beyond not finding the output.
pub fn get_unspent(
&self,
commit: Commitment,
) -> Result<Option<(OutputIdentifier, CommitPos)>, Error> {
self.txhashset.read().get_unspent(commit)
}
/// TODO - where do we call this from? And do we need a rewind first?
/// For the given commitment find the unspent output and return the
/// associated Return an error if the output does not exist or has been
/// spent. This querying is done in a way that is consistent with the
/// current chain state, specifically the current winning (valid, most
/// work) fork.
pub fn get_token_unspent(
&self,
output_ref: &TokenOutputIdentifier,
) -> Result<Option<CommitPos>, Error> {
self.txhashset.read().get_token_unspent(output_ref)
}
/// Retrieves an unspent output using its PMMR position
pub fn get_unspent_output_at(&self, pos: u64) -> Result<Output, Error> {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, _| {
utxo.get_unspent_output_at(pos)
})
}
/// Retrieves an unspent output using its PMMR position
pub fn get_unspent_token_output_at(&self, pos: u64) -> Result<TokenOutput, Error> {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, _| {
utxo.get_unspent_token_output_at(pos)
})
}
/// Validate the tx against the current UTXO set and recent kernels (NRD relative lock heights).
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
self.validate_tx_against_utxo(tx)?;
self.validate_tx_kernels(tx)?;
Ok(())
}
/// Validates NRD relative height locks against "recent" kernel history.
/// Applies the kernels to the current kernel MMR in a readonly extension.
/// The extension and the db batch are discarded.
/// The batch ensures duplicate NRD kernels within the tx are handled correctly.
fn validate_tx_kernels(&self, tx: &Transaction) -> Result<(), Error> {
let has_nrd_kernel = tx.kernels().iter().any(|k| match k.features {
KernelFeatures::NoRecentDuplicate { .. } => true,
_ => false,
});
if !has_nrd_kernel {
return Ok(());
}
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
let height = self.next_block_height()?;
ext.extension.apply_kernels(tx.kernels(), height, batch)
})
}
fn validate_tx_against_utxo(
&self,
tx: &Transaction,
) -> Result<Vec<(OutputIdentifier, CommitPos)>, Error> {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
utxo.validate_tx(tx, batch)
})
}
/// Validates inputs against the current utxo.
/// Each input must spend an unspent output.
/// Returns the vec of output identifiers and their pos of the outputs
/// that would be spent by the inputs.
pub fn validate_inputs(
&self,
inputs: &Inputs,
) -> Result<Vec<(OutputIdentifier, CommitPos)>, Error> {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
utxo.validate_inputs(inputs, batch)
})
}
fn next_block_height(&self) -> Result<u64, Error> {
let bh = self.head_header()?;
Ok(bh.height + 1)
}
/// Verify we are not attempting to spend a coinbase output
/// that has not yet sufficiently matured.
pub fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), Error> {
let height = self.next_block_height()?;
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
utxo.verify_coinbase_maturity(inputs, height, batch)?;
Ok(())
})
}
/// Verify that the tx has a lock_height that is less than or equal to
/// the height of the next block.
pub fn verify_tx_lock_height(&self, tx: &Transaction) -> Result<(), Error> {
let height = self.next_block_height()?;
if tx.lock_height() <= height {
Ok(())
} else {
Err(ErrorKind::TxLockHeight.into())
}
}
/// Validate the current chain state.
pub fn validate(&self, fast_validation: bool) -> Result<(), Error> {
let header = self.store.head_header()?;
// Lets just treat an "empty" node that just got started up as valid.
if header.height == 0 {
return Ok(());
}
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
// Now create an extension from the txhashset and validate against the
// latest block header. Rewind the extension to the specified header to
// ensure the view is consistent.
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
pipe::rewind_and_apply_fork(&header, ext, batch)?;
ext.extension
.validate(&self.genesis, fast_validation, &NoStatus, &header)?;
Ok(())
})
}
/// Sets prev_root on a brand new block header by applying the previous header to the header MMR.
pub fn set_prev_root_only(&self, header: &mut BlockHeader) -> Result<(), Error> {
let mut header_pmmr = self.header_pmmr.write();
let prev_root =
txhashset::header_extending_readonly(&mut header_pmmr, &self.store(), |ext, batch| {
let prev_header = batch.get_previous_header(header)?;
pipe::rewind_and_apply_header_fork(&prev_header, ext, batch)?;
ext.root()
})?;
// Set the prev_root on the header.
header.prev_root = prev_root;
Ok(())
}
/// Sets the txhashset roots on a brand new block by applying the block on
/// the current txhashset state.
pub fn set_txhashset_roots(&self, b: &mut Block) -> Result<(), Error> {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let (prev_root, roots, sizes) =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
let previous_header = batch.get_previous_header(&b.header)?;
pipe::rewind_and_apply_fork(&previous_header, ext, batch)?;
let extension = &mut ext.extension;
let header_extension = &mut ext.header_extension;
// Retrieve the header root before we apply the new block
let prev_root = header_extension.root()?;
// Apply the latest block to the chain state via the extension.
extension.apply_block(b, header_extension, batch)?;
Ok((prev_root, extension.roots()?, extension.sizes()))
})?;
// Set the output and kernel MMR sizes.
// Note: We need to do this *before* calculating the roots as the output_root
// depends on the output_mmr_size
{
// Carefully destructure these correctly...
let (
output_mmr_size,
_,
kernel_mmr_size,
token_output_mmr_size,
_,
token_issue_proof_mmr_size,
token_kernel_mmr_size,
) = sizes;
b.header.output_mmr_size = output_mmr_size;
b.header.kernel_mmr_size = kernel_mmr_size;
b.header.token_output_mmr_size = token_output_mmr_size;
b.header.token_issue_proof_mmr_size = token_issue_proof_mmr_size;
b.header.token_kernel_mmr_size = token_kernel_mmr_size;
}
// Set the prev_root on the header.
b.header.prev_root = prev_root;
// Set the output, rangeproof and kernel MMR roots.
b.header.output_root = roots.output_root(&b.header);
b.header.range_proof_root = roots.rproof_root;
b.header.kernel_root = roots.kernel_root;
b.header.token_output_root = roots.token_output_root;
b.header.token_range_proof_root = roots.token_rproof_root;
b.header.token_issue_proof_root = roots.token_issue_proof_root;
b.header.token_kernel_root = roots.token_kernel_root;
Ok(())
}
/// Return a Merkle proof for the given commitment from the store.
pub fn get_merkle_proof<T: AsRef<OutputIdentifier>>(
&self,
out_id: T,
header: &BlockHeader,
) -> Result<MerkleProof, Error> {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let merkle_proof =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
pipe::rewind_and_apply_fork(&header, ext, batch)?;
ext.extension.merkle_proof(out_id, batch)
})?;
Ok(merkle_proof)
}
/// Return a merkle proof valid for the current output pmmr state at the
/// given pos
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, Error> {
let mut txhashset = self.txhashset.write();
txhashset.merkle_proof(commit)
}
/// Return a merkle proof valid for the current token output pmmr state at the
/// given pos
pub fn get_token_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, Error> {
let mut txhashset = self.txhashset.write();
txhashset.token_merkle_proof(commit)
}
/// Provides a reading view into the current txhashset state as well as
/// the required indexes for a consumer to rewind to a consistent state
/// at the provided block hash.
pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, u64, u64, File), Error> {
// now we want to rewind the txhashset extension and
// sync a "rewound" copy of the leaf_set files to disk
// so we can send these across as part of the zip file.
// The fast sync client does *not* have the necessary data
// to rewind after receiving the txhashset zip.
let header = self.get_block_header(&h)?;
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
pipe::rewind_and_apply_fork(&header, ext, batch)?;
ext.extension.snapshot(batch)?;
// prepare the zip
txhashset::zip_read(self.db_root.clone(), &header).map(|file| {
(
header.output_mmr_size,
header.kernel_mmr_size,
header.token_output_mmr_size,
header.token_issue_proof_mmr_size,
file,
)
})
})
}
/// The segmenter is responsible for generation PIBD segments.
/// We cache a segmenter instance based on the current archve period (new period every 12 hours).
/// This allows us to efficiently generate bitmap segments for the current archive period.
///
/// It is a relatively expensive operation to initializa and cache a new segmenter instance
/// as this involves rewinding the txhashet by approx 720 blocks (12 hours).
///
/// Caller is responsible for only doing this when required.
/// Caller should verify a peer segment request is valid before calling this for example.
///
pub fn segmenter(&self) -> Result<Segmenter, Error> {
// The archive header corresponds to the data we will segment.
let ref archive_header = self.txhashset_archive_header()?;
// Use our cached segmenter if we have one and the associated header matches.
if let Some(x) = self.pibd_segmenter.read().as_ref() {
if x.header() == archive_header {
return Ok(x.clone());
}
}
// We have no cached segmenter or the cached segmenter is no longer useful.
// Initialize a new segment, cache it and return it.
let segmenter = self.init_segmenter(archive_header)?;
let mut cache = self.pibd_segmenter.write();
*cache = Some(segmenter.clone());
return Ok(segmenter);
}
/// This is an expensive rewind to recreate bitmap state but we only need to do this once.
/// Caller is responsible for "caching" the segmenter (per archive period) for reuse.
fn init_segmenter(&self, header: &BlockHeader) -> Result<Segmenter, Error> {
let now = Instant::now();
debug!(
"init_segmenter: initializing new segmenter for {} at {}",
header.hash(),
header.height
);
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let bitmap_snapshot =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
ext.extension.rewind(header, batch)?;
Ok(ext.extension.bitmap_accumulator())
})?;
debug!("init_segmenter: done, took {}ms", now.elapsed().as_millis());
Ok(Segmenter::new(
self.txhashset(),
Arc::new(bitmap_snapshot),
header.clone(),
))
}
/// To support the ability to download the txhashset from multiple peers in parallel,
/// the peers must all agree on the exact binary representation of the txhashset.
/// This means compacting and rewinding to the exact same header.
/// Since compaction is a heavy operation, peers can agree to compact every 12 hours,
/// and no longer support requesting arbitrary txhashsets.
/// Here we return the header of the txhashset we are currently offering to peers.
pub fn txhashset_archive_header(&self) -> Result<BlockHeader, Error> {
let sync_threshold = global::state_sync_threshold() as u64;
let body_head = self.head()?;
let archive_interval = global::txhashset_archive_interval();
let mut txhashset_height = body_head.height.saturating_sub(sync_threshold);
txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval);
debug!(
"txhashset_archive_header: body_head - {}, {}, txhashset height - {}",
body_head.last_block_h, body_head.height, txhashset_height,
);
self.get_header_by_height(txhashset_height)
}
// Special handling to make sure the whole kernel set matches each of its
// roots in each block header, without truncation. We go back header by
// header, rewind and check each root. This fixes a potential weakness in
// fast sync where a reorg past the horizon could allow a whole rewrite of
// the kernel set.
fn validate_kernel_history(
&self,
header: &BlockHeader,
txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> {
debug!("validate_kernel_history: rewinding and validating kernel history (readonly)");
let mut count = 0;
let mut current = header.clone();
txhashset::rewindable_kernel_view(&txhashset, |view, batch| {
while current.height > 0 {
view.rewind(¤t)?;
view.validate_root()?;
current = batch.get_previous_header(¤t)?;
count += 1;
}
Ok(())
})?;
debug!(
"validate_kernel_history: validated kernel root on {} headers",
count,
);
Ok(())
}
/// Finds the "fork point" where header chain diverges from full block chain.
/// If we are syncing this will correspond to the last full block where
/// the next header is known but we do not yet have the full block.
/// i.e. This is the last known full block and all subsequent blocks are missing.
pub fn fork_point(&self) -> Result<BlockHeader, Error> {
let body_head = self.head()?;
let mut current = self.get_block_header(&body_head.hash())?;
while !self.is_on_current_chain(¤t).is_ok() {
current = self.get_previous_header(¤t)?;
}
Ok(current)
}
/// Compare fork point to our horizon.
/// If beyond the horizon then we cannot sync via recent full blocks
/// and we need a state (txhashset) sync.
pub fn check_txhashset_needed(&self, fork_point: &BlockHeader) -> Result<bool, Error> {
if self.archive_mode() {
debug!("check_txhashset_needed: we are running with archive_mode=true, not needed");
return Ok(false);
}
let header_head = self.header_head()?;
let horizon = global::cut_through_horizon() as u64;
Ok(fork_point.height < header_head.height.saturating_sub(horizon))
}
/// Clean the temporary sandbox folder
pub fn clean_txhashset_sandbox(&self) {
txhashset::clean_txhashset_folder(&self.get_tmp_dir());
}
/// Specific tmp dir.
/// Normally it's ~/.grin/main/tmp for mainnet
/// or ~/.grin/test/tmp for Testnet
pub fn get_tmp_dir(&self) -> PathBuf {
let mut tmp_dir = PathBuf::from(self.db_root.clone());
tmp_dir = tmp_dir
.parent()
.expect("fail to get parent of db_root dir")
.to_path_buf();
tmp_dir.push("tmp");
tmp_dir
}
/// Get a tmp file path in above specific tmp dir (create tmp dir if not exist)
/// Delete file if tmp file already exists
pub fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf {
let mut tmp = self.get_tmp_dir();
if !tmp.exists() {
if let Err(e) = fs::create_dir(tmp.clone()) {
warn!("fail to create tmp folder on {:?}. err: {}", tmp, e);
}
}
tmp.push(tmpfile_name);
if tmp.exists() {
if let Err(e) = fs::remove_file(tmp.clone()) {
warn!("fail to clean existing tmp file: {:?}. err: {}", tmp, e);
}
}
tmp
}
/// Writes a reading view on a txhashset state that's been provided to us.
/// If we're willing to accept that new state, the data stream will be
/// read as a zip file, unzipped and the resulting state files should be
/// rewound to the provided indexes.
pub fn txhashset_write(
&self,
h: Hash,
txhashset_data: File,
status: &dyn TxHashsetWriteStatus,
) -> Result<bool, Error> {
status.on_setup();
// Initial check whether this txhashset is needed or not
let fork_point = self.fork_point()?;
if !self.check_txhashset_needed(&fork_point)? {
warn!("txhashset_write: txhashset received but it's not needed! ignored.");
return Err(ErrorKind::InvalidTxHashSet("not needed".to_owned()).into());
}
let header = match self.get_block_header(&h) {
Ok(header) => header,
Err(_) => {
warn!("txhashset_write: cannot find block header");
// This is a bannable reason
return Ok(true);
}
};
// Write txhashset to sandbox (in the Grin specific tmp dir)
let sandbox_dir = self.get_tmp_dir();
txhashset::clean_txhashset_folder(&sandbox_dir);
txhashset::zip_write(sandbox_dir.clone(), txhashset_data.try_clone()?, &header)?;
let mut txhashset = txhashset::TxHashSet::open(
sandbox_dir
.to_str()
.expect("invalid sandbox folder")
.to_owned(),
self.store.clone(),
Some(&header),
)?;
// Validate the full kernel history.
// Check kernel MMR root for every block header.
// Check NRD relative height rules for full kernel history.
{
self.validate_kernel_history(&header, &txhashset)?;
let header_pmmr = self.header_pmmr.read();
let batch = self.store.batch()?;
txhashset.verify_kernel_pos_index(&self.genesis, &header_pmmr, &batch)?;
}
// all good, prepare a new batch and update all the required records
debug!("txhashset_write: rewinding a 2nd time (writeable)");
let mut header_pmmr = self.header_pmmr.write();
let mut batch = self.store.batch()?;
txhashset::extending(
&mut header_pmmr,
&mut txhashset,
&mut batch,
|ext, batch| {
let extension = &mut ext.extension;
extension.rewind(&header, batch)?;
// Validate the extension, generating the utxo_sum and kernel_sum.
// Full validation, including rangeproofs and kernel signature verification.
let (utxo_sum, kernel_sum, block_token_sums) =
extension.validate(&self.genesis, false, status, &header)?;
// Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
batch.save_block_sums(
&header.hash(),
BlockSums {
utxo_sum,
kernel_sum,
},
)?;
batch.save_block_token_sums(&header.hash(), &block_token_sums)?;
Ok(())
},
)?;
debug!("txhashset_write: finished validating and rebuilding");
status.on_save();
// Save the new head to the db and rebuild the header by height index.
{
let tip = Tip::from_header(&header);
batch.save_body_head(&tip)?;
// Reset the body tail to the body head after a txhashset write
batch.save_body_tail(&tip)?;
}
// Rebuild our output_pos index in the db based on fresh UTXO set.
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
txhashset.init_token_output_pos_index(&header_pmmr, &batch)?;
// Rebuild our NRD kernel_pos index based on recent kernel history.
txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?;
// Commit all the changes to the db.
batch.commit()?;
debug!("txhashset_write: finished committing the batch (head etc.)");
// Sandbox full validation ok, go to overwrite txhashset on db root
{
let mut txhashset_ref = self.txhashset.write();
// Before overwriting, drop file handlers in underlying txhashset
txhashset_ref.release_backend_files();
// Move sandbox to overwrite
txhashset.release_backend_files();
txhashset::txhashset_replace(sandbox_dir, PathBuf::from(self.db_root.clone()))?;
// Re-open on db root dir
txhashset = txhashset::TxHashSet::open(
self.db_root.clone(),
self.store.clone(),
Some(&header),
)?;
// Replace the chain txhashset with the newly built one.
*txhashset_ref = txhashset;
}
debug!("txhashset_write: replaced our txhashset with the new one");
status.on_done();
Ok(false)
}
/// Cleanup old blocks from the db.
/// Determine the cutoff height from the horizon and the current block height.
/// *Only* runs if we are not in archive mode.
fn remove_historical_blocks(
&self,
header_pmmr: &txhashset::PMMRHandle<BlockHeader>,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
if self.archive_mode() {
return Ok(());
}
let horizon = global::cut_through_horizon() as u64;
let head = batch.head()?;
let tail = match batch.tail() {
Ok(tail) => tail,
Err(_) => Tip::from_header(&self.genesis),
};
let cutoff = head.height.saturating_sub(horizon);
debug!(
"remove_historical_blocks: head height: {}, tail height: {}, horizon: {}, cutoff: {}",
head.height, tail.height, horizon, cutoff,
);
if cutoff == 0 {
return Ok(());
}
let mut count = 0;
let tail_hash = header_pmmr.get_header_hash_by_height(head.height - horizon)?;
let tail = batch.get_block_header(&tail_hash)?;
// Remove old blocks (including short lived fork blocks) which height < tail.height
for block in batch.blocks_iter()? {
if block.header.height < tail.height {
let _ = batch.delete_block(&block.hash());
count += 1;
}
}
batch.save_body_tail(&Tip::from_header(&tail))?;
debug!(
"remove_historical_blocks: removed {} blocks. tail height: {}",
count, tail.height
);
Ok(())
}
/// Triggers chain compaction.
///
/// * compacts the txhashset based on current prune_list
/// * removes historical blocks and associated data from the db (unless archive mode)
///
pub fn compact(&self) -> Result<(), Error> {
// A node may be restarted multiple times in a short period of time.
// We compact at most once per 60 blocks in this situation by comparing
// current "head" and "tail" height to our cut-through horizon and
// allowing an additional 60 blocks in height before allowing a further compaction.
if let (Ok(tail), Ok(head)) = (self.tail(), self.head()) {
let horizon = global::cut_through_horizon() as u64;
let threshold = horizon.saturating_add(60);
let next_compact = tail.height.saturating_add(threshold);
if next_compact > head.height {
debug!(
"compact: skipping startup compaction (next at {})",
next_compact
);
return Ok(());
}
}
// Take a write lock on the txhashet and start a new writeable db batch.
let header_pmmr = self.header_pmmr.read();
let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?;
// Compact the txhashset itself (rewriting the pruned backend files).
{
let head_header = batch.head_header()?;
let current_height = head_header.height;
let horizon_height =
current_height.saturating_sub(global::cut_through_horizon().into());
let horizon_hash = header_pmmr.get_header_hash_by_height(horizon_height)?;
let horizon_header = batch.get_block_header(&horizon_hash)?;
txhashset.compact(&horizon_header, &batch)?;
}
// If we are not in archival mode remove historical blocks from the db.
if !self.archive_mode() {
self.remove_historical_blocks(&header_pmmr, &batch)?;
}
// Make sure our output_pos index is consistent with the UTXO set.
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
txhashset.init_token_output_pos_index(&header_pmmr, &batch)?;
// TODO - Why is this part of chain compaction?
// Rebuild our NRD kernel_pos index based on recent kernel history.
txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?;
// Commit all the above db changes.
batch.commit()?;
Ok(())
}
/// returns the last n nodes inserted into the output sum tree
pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
self.txhashset.read().last_n_output(distance)
}
/// returns the last n nodes inserted into the token output sum tree
pub fn get_last_n_token_output(&self, distance: u64) -> Vec<(Hash, TokenOutputIdentifier)> {
self.txhashset.read().last_n_token_output(distance)
}
/// as above, for rangeproofs
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
self.txhashset.read().last_n_rangeproof(distance)
}
/// as above, for token rangeproofs
pub fn get_last_n_token_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
self.txhashset.read().last_n_token_rangeproof(distance)
}
/// as above, for kernels
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
self.txhashset.read().last_n_kernel(distance)
}
/// as above, for token issue proof
pub fn get_last_n_token_issue_proof(&self, distance: u64) -> Vec<(Hash, TokenIssueProof)> {
self.txhashset.read().last_n_token_issue_proof(distance)
}
/// Return Commit's MMR position
pub fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
Ok(self.txhashset.read().get_output_pos(commit)?)
}
/// as above, for kernels
pub fn get_token_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
Ok(self.txhashset.read().get_token_output_pos(commit)?)
}
/// outputs by insertion index
pub fn unspent_outputs_by_pmmr_index(
&self,
start_index: u64,
max_count: u64,
max_pmmr_index: Option<u64>,
) -> Result<(u64, u64, Vec<Output>), Error> {
let txhashset = self.txhashset.read();
let last_index = match max_pmmr_index {
Some(i) => i,
None => txhashset.highest_output_insertion_index(),
};
let outputs = txhashset.outputs_by_pmmr_index(start_index, max_count, max_pmmr_index);
let rangeproofs =
txhashset.rangeproofs_by_pmmr_index(start_index, max_count, max_pmmr_index);
if outputs.0 != rangeproofs.0 || outputs.1.len() != rangeproofs.1.len() {
return Err(ErrorKind::TxHashSetErr(String::from(
"Output and rangeproof sets don't match",
))
.into());
}
let mut output_vec: Vec<Output> = vec![];
for (ref x, &y) in outputs.1.iter().zip(rangeproofs.1.iter()) {
output_vec.push(Output::new(x.features, x.commitment(), y));
}
Ok((outputs.0, last_index, output_vec))
}
/// outputs by insertion index
pub fn unspent_token_outputs_by_pmmr_index(
&self,
start_index: u64,
max_count: u64,
max_pmmr_index: Option<u64>,
) -> Result<(u64, u64, Vec<TokenOutput>), Error> {
let txhashset = self.txhashset.read();
let last_index = match max_pmmr_index {
Some(i) => i,
None => txhashset.highest_token_output_insertion_index(),
};
let outputs = txhashset.token_outputs_by_pmmr_index(start_index, max_count, max_pmmr_index);
let rangeproofs =
txhashset.token_rangeproofs_by_pmmr_index(start_index, max_count, max_pmmr_index);
if outputs.0 != rangeproofs.0 || outputs.1.len() != rangeproofs.1.len() {
return Err(ErrorKind::TxHashSetErr(String::from(
"Token Output and rangeproof sets don't match",
))
.into());
}
let mut output_vec: Vec<TokenOutput> = vec![];
for (ref x, &y) in outputs.1.iter().zip(rangeproofs.1.iter()) {
output_vec.push(TokenOutput::new(
x.features,
x.token_type,
x.commitment(),
y,
));
}
Ok((outputs.0, last_index, output_vec))
}
/// Return unspent outputs as above, but bounded between a particular range of blocks
pub fn block_height_range_to_pmmr_indices(
&self,
start_block_height: u64,
end_block_height: Option<u64>,
) -> Result<(u64, u64), Error> {
let end_block_height = match end_block_height {
Some(h) => h,
None => self.head_header()?.height,
};
// Return headers at the given heights
let prev_to_start_header =
self.get_header_by_height(start_block_height.saturating_sub(1))?;
let end_header = self.get_header_by_height(end_block_height)?;
Ok((
prev_to_start_header.output_mmr_size + 1,
end_header.output_mmr_size,
))
}
/// Return unspent outputs as above, but bounded between a particular range of blocks
pub fn block_height_range_to_token_pmmr_indices(
&self,
start_block_height: u64,
end_block_height: Option<u64>,
) -> Result<(u64, u64), Error> {
let end_block_height = match end_block_height {
Some(h) => h,
None => self.head_header()?.height,
};
// Return headers at the given heights
let prev_to_start_header =
self.get_header_by_height(start_block_height.saturating_sub(1))?;
let end_header = self.get_header_by_height(end_block_height)?;
Ok((
prev_to_start_header.token_output_mmr_size + 1,
end_header.token_output_mmr_size,
))
}
/// Orphans pool size
pub fn orphans_len(&self) -> usize {
self.orphans.len()
}
/// Tip (head) of the block chain.
pub fn head(&self) -> Result<Tip, Error> {
self.store
.head()
.map_err(|e| ErrorKind::StoreErr(e, "chain head".to_owned()).into())
}
/// Tail of the block chain in this node after compact (cross-block cut-through)
pub fn tail(&self) -> Result<Tip, Error> {
self.store
.tail()
.map_err(|e| ErrorKind::StoreErr(e, "chain tail".to_owned()).into())
}
/// Tip (head) of the header chain.
pub fn header_head(&self) -> Result<Tip, Error> {
self.store
.header_head()
.map_err(|e| ErrorKind::StoreErr(e, "header head".to_owned()).into())
}
/// Block header for the chain head
pub fn head_header(&self) -> Result<BlockHeader, Error> {
self.store
.head_header()
.map_err(|e| ErrorKind::StoreErr(e, "chain head header".to_owned()).into())
}
/// Gets a block by hash
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
self.store
.get_block(h)
.map_err(|e| ErrorKind::StoreErr(e, "chain get block".to_owned()).into())
}
/// Gets a block header by hash
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
self.store
.get_block_header(h)
.map_err(|e| ErrorKind::StoreErr(e, "chain get header".to_owned()).into())
}
/// Get previous block header.
pub fn get_previous_header(&self, header: &BlockHeader) -> Result<BlockHeader, Error> {
self.store
.get_previous_header(header)
.map_err(|e| ErrorKind::StoreErr(e, "chain get previous header".to_owned()).into())
}
/// Get block_sums by header hash.
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
self.store
.get_block_sums(h)
.map_err(|e| ErrorKind::StoreErr(e, "chain get block_sums".to_owned()).into())
}
/// Get block_token_sums by header hash.
pub fn get_block_token_sums(&self, h: &Hash) -> Result<BlockTokenSums, Error> {
self.store
.get_block_token_sums(h)
.map_err(|e| ErrorKind::StoreErr(e, "chain get block_token_sums".to_owned()).into())
}
/// Gets the block header at the provided height.
/// Note: Takes a read lock on the header_pmmr.
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
let hash = self.get_header_hash_by_height(height)?;
self.get_block_header(&hash)
}
/// Gets the header hash at the provided height.
/// Note: Takes a read lock on the header_pmmr.
fn get_header_hash_by_height(&self, height: u64) -> Result<Hash, Error> {
self.header_pmmr.read().get_header_hash_by_height(height)
}
/// Migrate our local db from v3 to v4.
/// "commit only" inputs.
fn migrate_db_v3_to_v4(store: &ChainStore) -> Result<(), Error> {
let mut keys_to_migrate = vec![];
for (k, v) in store.batch()?.blocks_raw_iter()? {
// We want to migrate all blocks that cannot be read via v4 protocol version.
let block_v3: Result<Block, _> =
ser::deserialize(&mut Cursor::new(&v), ProtocolVersion(3));
let block_v4: Result<Block, _> =
ser::deserialize(&mut Cursor::new(&v), ProtocolVersion(4));
if let (Ok(_), Err(_)) = (block_v3, block_v4) {
keys_to_migrate.push(k);
}
}
debug!(
"migrate_db_v3_to_v4: {} blocks to migrate",
keys_to_migrate.len()
);
let mut count = 0;
keys_to_migrate.chunks(100).try_for_each(|keys| {
let batch = store.batch()?;
for key in keys {
batch.migrate_block(&key, ProtocolVersion(3), ProtocolVersion(4))?;
count += 1;
}
batch.commit()?;
debug!(
"migrate_db_v3_to_v4: successfully migrated {} blocks",
count
);
Ok(())
})
}
/// Gets the block header in which a given output appears in the txhashset.
pub fn get_header_for_output(&self, commit: Commitment) -> Result<BlockHeader, Error> {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
let (_, pos) = match txhashset.get_unspent(commit)? {
Some(o) => o,
None => return Err(ErrorKind::OutputNotFound.into()),
};
let hash = header_pmmr.get_header_hash_by_height(pos.height)?;
Ok(self.get_block_header(&hash)?)
}
/// Gets the block header in which a given token output appears in the txhashset.
pub fn get_header_for_token_output(
&self,
output_ref: &TokenOutputIdentifier,
) -> Result<BlockHeader, Error> {
let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read();
let output_pos = match txhashset.get_token_unspent(output_ref)? {
Some(o) => o,
None => return Err(ErrorKind::OutputNotFound.into()),
};
let hash = header_pmmr.get_header_hash_by_height(output_pos.height)?;
Ok(self.get_block_header(&hash)?)
}
/// Gets the kernel with a given excess and the block height it is included in.
pub fn get_kernel_height(
&self,
excess: &Commitment,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<Option<(TxKernel, u64, u64)>, Error> {
let head = self.head()?;
if let (Some(min), Some(max)) = (min_height, max_height) {
if min > max {
return Ok(None);
}
}
let min_index = match min_height {
Some(0) => None,
Some(h) => {
if h > head.height {
return Ok(None);
}
let header = self.get_header_by_height(h)?;
let prev_header = self.get_previous_header(&header)?;
Some(prev_header.kernel_mmr_size + 1)
}
None => None,
};
let max_index = match max_height {
Some(h) => {
if h > head.height {
None
} else {
let header = self.get_header_by_height(h)?;
Some(header.kernel_mmr_size)
}
}
None => None,
};
let (kernel, mmr_index) = match self
.txhashset
.read()
.find_kernel(&excess, min_index, max_index)
{
Some(k) => k,
None => return Ok(None),
};
let header = self.get_header_for_kernel_index(mmr_index, min_height, max_height)?;
Ok(Some((kernel, header.height, mmr_index)))
}
/// Gets the token kernel with a given excess and the block height it is included in.
pub fn get_token_kernel_height(
&self,
excess: &Commitment,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<Option<(TokenTxKernel, u64, u64)>, Error> {
let min_index = match min_height {
Some(h) => Some(self.get_header_by_height(h - 1)?.token_kernel_mmr_size + 1),
None => None,
};
let max_index = match max_height {
Some(h) => Some(self.get_header_by_height(h)?.token_kernel_mmr_size),
None => None,
};
let (kernel, mmr_index) = match self
.txhashset
.read()
.find_token_kernel(&excess, min_index, max_index)
{
Some(k) => k,
None => return Ok(None),
};
let header = self.get_header_for_token_kernel_index(mmr_index, min_height, max_height)?;
Ok(Some((kernel, header.height, mmr_index)))
}
/// Gets the block header in which a given kernel mmr index appears in the txhashset.
pub fn get_header_for_kernel_index(
&self,
kernel_mmr_index: u64,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<BlockHeader, Error> {
let header_pmmr = self.header_pmmr.read();
let mut min = min_height.unwrap_or(0).saturating_sub(1);
let mut max = match max_height {
Some(h) => h,
None => self.head()?.height,
};
loop {
let search_height = max - (max - min) / 2;
let hash = header_pmmr.get_header_hash_by_height(search_height)?;
let h = self.get_block_header(&hash)?;
if search_height == 0 {
return Ok(h);
}
let hash_prev = header_pmmr.get_header_hash_by_height(search_height - 1)?;
let h_prev = self.get_block_header(&hash_prev)?;
if kernel_mmr_index > h.kernel_mmr_size {
min = search_height;
} else if kernel_mmr_index < h_prev.kernel_mmr_size {
max = search_height;
} else {
if kernel_mmr_index == h_prev.kernel_mmr_size {
return Ok(h_prev);
}
return Ok(h);
}
}
}
/// Gets the block header in which a given token kernel mmr index appears in the txhashset.
pub fn get_header_for_token_kernel_index(
&self,
token_kernel_mmr_index: u64,
min_height: Option<u64>,
max_height: Option<u64>,
) -> Result<BlockHeader, Error> {
let header_pmmr = self.header_pmmr.read();
let mut min = min_height.unwrap_or(0).saturating_sub(1);
let mut max = match max_height {
Some(h) => h,
None => self.head()?.height,
};
loop {
let search_height = max - (max - min) / 2;
let hash = header_pmmr.get_header_hash_by_height(search_height)?;
let h = self.get_block_header(&hash)?;
if search_height == 0 {
return Ok(h);
}
let hash_prev = header_pmmr.get_header_hash_by_height(search_height - 1)?;
let h_prev = self.get_block_header(&hash_prev)?;
if token_kernel_mmr_index > h.token_kernel_mmr_size {
min = search_height;
} else if token_kernel_mmr_index < h_prev.token_kernel_mmr_size {
max = search_height;
} else {
if token_kernel_mmr_index == h_prev.token_kernel_mmr_size {
return Ok(h_prev);
}
return Ok(h);
}
}
}
/// Verifies the given block header is actually on the current chain.
/// Checks the header_by_height index to verify the header is where we say
/// it is
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
let chain_header = self.get_header_by_height(header.height)?;
if chain_header.hash() == header.hash() {
Ok(())
} else {
Err(ErrorKind::Other("not on current chain".to_string()).into())
}
}
/// Gets multiple headers at the provided heights.
/// Note: This is based on the provided sync_head to support syncing against a fork.
pub fn get_locator_hashes(&self, sync_head: Tip, heights: &[u64]) -> Result<Vec<Hash>, Error> {
let mut header_pmmr = self.header_pmmr.write();
txhashset::header_extending_readonly(&mut header_pmmr, &self.store(), |ext, batch| {
let header = batch.get_block_header(&sync_head.hash())?;
pipe::rewind_and_apply_header_fork(&header, ext, batch)?;
let hashes = heights
.iter()
.filter_map(|h| ext.get_header_hash_by_height(*h))
.collect();
Ok(hashes)
})
}
/// Builds an iterator on blocks starting from the current chain head and
/// running backward. Specialized to return information pertaining to block
/// difficulty calculation (timestamp and previous difficulties).
pub fn difficulty_iter(&self) -> Result<store::DifficultyIter<'_>, Error> {
let head = self.head()?;
let store = self.store.clone();
Ok(store::DifficultyIter::from(head.last_block_h, store))
}
/// Check whether we have a block without reading it
pub fn block_exists(&self, h: Hash) -> Result<bool, Error> {
self.store
.block_exists(&h)
.map_err(|e| ErrorKind::StoreErr(e, "chain block exists".to_owned()).into())
}
}
fn setup_head(
genesis: &Block,
store: &store::ChainStore,
header_pmmr: &mut txhashset::PMMRHandle<BlockHeader>,
txhashset: &mut txhashset::TxHashSet,
) -> Result<(), Error> {
let mut batch = store.batch()?;
// Apply the genesis header to header MMR.
{
if batch.get_block_header(&genesis.hash()).is_err() {
batch.save_block_header(&genesis.header)?;
}
if header_pmmr.last_pos == 0 {
txhashset::header_extending(header_pmmr, &mut batch, |ext, _| {
ext.apply_header(&genesis.header)
})?;
}
}
// Make sure our header PMMR is consistent with header_head from db if it exists.
// If header_head is missing in db then use head of header PMMR.
if let Ok(head) = batch.header_head() {
header_pmmr.init_head(&head)?;
txhashset::header_extending(header_pmmr, &mut batch, |ext, batch| {
let header = batch.get_block_header(&head.hash())?;
ext.rewind(&header)
})?;
} else {
let hash = header_pmmr.head_hash()?;
let header = batch.get_block_header(&hash)?;
batch.save_header_head(&Tip::from_header(&header))?;
}
// check if we have a head in store, otherwise the genesis block is it
let head_res = batch.head();
let mut head: Tip;
match head_res {
Ok(h) => {
head = h;
loop {
// Use current chain tip if we have one.
// Note: We are rewinding and validating against a writeable extension.
// If validation is successful we will truncate the backend files
// to match the provided block header.
let header = batch.get_block_header(&head.last_block_h)?;
let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
pipe::rewind_and_apply_fork(&header, ext, batch)?;
let extension = &mut ext.extension;
extension.validate_roots(&header)?;
// now check we have the "block sums" for the block in question
// if we have no sums (migrating an existing node) we need to go
// back to the txhashset and sum the outputs and kernels
if header.height > 0 && batch.get_block_sums(&header.hash()).is_err() {
debug!(
"init: building (missing) block sums for {} @ {}",
header.height,
header.hash()
);
// Do a full (and slow) validation of the txhashset extension
// to calculate the utxo_sum and kernel_sum at this block height.
let (utxo_sum, kernel_sum) =
extension.validate_kernel_sums(&genesis.header, &header)?;
// Save the block_sums to the db for use later.
batch.save_block_sums(
&header.hash(),
BlockSums {
utxo_sum,
kernel_sum,
},
)?;
}
if batch.get_block_token_sums(&header.hash()).is_err() {
debug!(
"init: building (missing) block token sums for {} @ {}",
header.height,
header.hash()
);
// Do a full (and slow) validation of the txhashset extension
// to calculate the block_token_sums at this block height.
let block_token_sums = extension.validate_token_kernel_sums()?;
// Save the block_token_sums to the db for use later.
batch.save_block_token_sums(&header.hash(), &block_token_sums)?;
}
debug!(
"init: rewinding and validating before we start... {} at {}",
header.hash(),
header.height,
);
Ok(())
});
if res.is_ok() {
break;
} else {
// We may have corrupted the MMR backend files last time we stopped the
// node. If this happens we rewind to the previous header,
// delete the "bad" block and try again.
let prev_header = batch.get_block_header(&head.prev_block_h)?;
txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
pipe::rewind_and_apply_fork(&prev_header, ext, batch)
})?;
// Now "undo" the latest block and forget it ever existed.
// We will request it from a peer during sync as necessary.
{
let _ = batch.delete_block(&header.hash());
head = Tip::from_header(&prev_header);
batch.save_body_head(&head)?;
}
}
}
}
Err(NotFoundErr(_)) => {
let mut sums = BlockSums::default();
// Save the genesis header with a "zero" header_root.
// We will update this later once we have the correct header_root.
batch.save_block(&genesis)?;
batch.save_spent_index(&genesis.hash(), &vec![])?;
batch.save_body_head(&Tip::from_header(&genesis.header))?;
if !genesis.kernels().is_empty() {
let (utxo_sum, kernel_sum) = (sums, genesis as &dyn Committed).verify_kernel_sums(
genesis.header.overage(),
genesis.header.total_kernel_offset(),
)?;
sums = BlockSums {
utxo_sum,
kernel_sum,
};
}
txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
ext.extension
.apply_block(&genesis, ext.header_extension, batch)
})?;
// Save the block_sums to the db for use later.
batch.save_block_sums(&genesis.hash(), sums)?;
batch.save_block_token_sums(&genesis.hash(), &BlockTokenSums::default())?;
info!("init: saved genesis: {:?}", genesis.hash());
}
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()).into()),
};
batch.commit()?;
Ok(())
}
| 32.305483 | 108 | 0.689227 |
9b9f6bc898489c21db036078fdcddb9bc1ad3396
| 5,879 |
use std::collections::HashMap;
#[cfg(feature = "simd-json")]
use simd_json::Mutable;
use super::{CreateAllowedMentions, CreateEmbed};
use crate::builder::CreateComponents;
use crate::json::{self, from_number, Value};
use crate::model::channel::AttachmentType;
use crate::model::interactions::InteractionApplicationCommandCallbackDataFlags;
#[derive(Clone, Debug, Default)]
pub struct CreateInteractionResponseFollowup<'a>(
pub HashMap<&'static str, Value>,
pub Vec<AttachmentType<'a>>,
);
impl<'a> CreateInteractionResponseFollowup<'a> {
/// Set the content of the message.
///
/// **Note**: Message contents must be under 2000 unicode code points.
#[inline]
pub fn content<D: ToString>(&mut self, content: D) -> &mut Self {
self._content(content.to_string())
}
fn _content(&mut self, content: String) -> &mut Self {
self.0.insert("content", Value::from(content));
self
}
/// Override the default username of the webhook
#[inline]
pub fn username<D: ToString>(&mut self, username: D) -> &mut Self {
self._username(username.to_string())
}
fn _username(&mut self, username: String) -> &mut Self {
self.0.insert("username", Value::from(username));
self
}
/// Override the default avatar of the webhook
#[inline]
pub fn avatar<D: ToString>(&mut self, avatar_url: D) -> &mut Self {
self._avatar(avatar_url.to_string())
}
fn _avatar(&mut self, avatar_url: String) -> &mut Self {
self.0.insert("avatar_url", Value::from(avatar_url));
self
}
/// Set whether the message is text-to-speech.
///
/// Think carefully before setting this to `true`.
///
/// Defaults to `false`.
pub fn tts(&mut self, tts: bool) -> &mut Self {
self.0.insert("tts", Value::from(tts));
self
}
/// Appends a file to the message.
pub fn add_file<T: Into<AttachmentType<'a>>>(&mut self, file: T) -> &mut Self {
self.1.push(file.into());
self
}
/// Appends a list of files to the message.
pub fn add_files<T: Into<AttachmentType<'a>>, It: IntoIterator<Item = T>>(
&mut self,
files: It,
) -> &mut Self {
self.1.extend(files.into_iter().map(|f| f.into()));
self
}
/// Sets a list of files to include in the message.
///
/// Calling this multiple times will overwrite the file list.
/// To append files, call [`Self::add_file`] or [`Self::add_files`] instead.
pub fn files<T: Into<AttachmentType<'a>>, It: IntoIterator<Item = T>>(
&mut self,
files: It,
) -> &mut Self {
self.1 = files.into_iter().map(|f| f.into()).collect();
self
}
/// Create an embed for the message.
pub fn embed<F>(&mut self, f: F) -> &mut Self
where
F: FnOnce(&mut CreateEmbed) -> &mut CreateEmbed,
{
let mut embed = CreateEmbed::default();
f(&mut embed);
self.add_embed(embed)
}
/// Adds an embed to the message.
pub fn add_embed(&mut self, embed: CreateEmbed) -> &mut Self {
let map = json::hashmap_to_json_map(embed.0);
let embed = Value::from(map);
self.0
.entry("embeds")
.or_insert_with(|| Value::from(Vec::<Value>::new()))
.as_array_mut()
.expect("couldn't add embed")
.push(embed);
self
}
/// Adds multiple embeds to the message.
pub fn add_embeds(&mut self, embeds: Vec<CreateEmbed>) -> &mut Self {
for embed in embeds {
self.add_embed(embed);
}
self
}
/// Sets a single embed to include in the message
///
/// Calling this will overwrite the embed list.
/// To append embeds, call [`Self::add_embed`] instead.
pub fn set_embed(&mut self, embed: CreateEmbed) -> &mut Self {
let map = json::hashmap_to_json_map(embed.0);
let embed = Value::from(map);
self.0.insert("embeds", Value::from(vec![embed]));
self
}
/// Sets a list of embeds to include in the message.
///
/// Calling this multiple times will overwrite the embed list.
/// To append embeds, call [`Self::add_embed`] instead.
pub fn set_embeds(&mut self, embeds: impl IntoIterator<Item = CreateEmbed>) -> &mut Self {
let embeds = embeds
.into_iter()
.map(|embed| json::hashmap_to_json_map(embed.0).into())
.collect::<Vec<Value>>();
self.0.insert("embeds", Value::from(embeds));
self
}
/// Set the allowed mentions for the message.
pub fn allowed_mentions<F>(&mut self, f: F) -> &mut Self
where
F: FnOnce(&mut CreateAllowedMentions) -> &mut CreateAllowedMentions,
{
let mut allowed_mentions = CreateAllowedMentions::default();
f(&mut allowed_mentions);
let map = json::hashmap_to_json_map(allowed_mentions.0);
let allowed_mentions = Value::from(map);
self.0.insert("allowed_mentions", allowed_mentions);
self
}
/// Sets the flags for the response.
pub fn flags(&mut self, flags: InteractionApplicationCommandCallbackDataFlags) -> &mut Self {
self.0.insert("flags", from_number(flags.bits()));
self
}
/// Creates components for this message.
pub fn components<F>(&mut self, f: F) -> &mut Self
where
F: FnOnce(&mut CreateComponents) -> &mut CreateComponents,
{
let mut components = CreateComponents::default();
f(&mut components);
self.0.insert("components", Value::from(components.0));
self
}
/// Sets the components of this message.
pub fn set_components(&mut self, components: CreateComponents) -> &mut Self {
self.0.insert("components", Value::Array(components.0));
self
}
}
| 31.271277 | 97 | 0.599932 |
64fbecd0389df5a15f4c246b4edd6ef01618c900
| 2,464 |
use crate::assets::ASSETS_DIR;
use rodio::{Decoder, Sink, Source};
use std::io::Cursor;
pub struct AudioManager {
music_track: Sink,
se_track: Sink,
music_volume: f32,
se_volume: f32,
}
impl AudioManager {
pub fn new() -> AudioManager {
let music_device = rodio::default_output_device().unwrap();
let se_device = rodio::default_output_device().unwrap();
AudioManager {
music_track: rodio::Sink::new(&music_device),
se_track: rodio::Sink::new(&se_device),
music_volume: 1.0,
se_volume: 1.0,
}
}
pub fn get_track_volumes(&mut self) -> (f32, f32) {
(self.music_track.volume(), self.se_track.volume())
}
pub fn set_music_track_volume(&mut self, value: f32) {
self.music_volume = value;
self.music_track.set_volume(value);
}
pub fn set_se_track_volume(&mut self, value: f32) {
self.se_volume = value;
self.se_track.set_volume(value);
}
pub fn play_music_track(&mut self, file_name: &str) {
if !self.music_track.empty() {
self.music_track.stop();
let music_device = rodio::default_output_device().unwrap();
self.music_track = rodio::Sink::new(&music_device);
self.music_track.set_volume(self.music_volume);
}
let file = Cursor::new(ASSETS_DIR.get_file(file_name).unwrap().contents());
self.music_track
.append(Decoder::new(file).unwrap().repeat_infinite());
self.music_track.play();
}
pub fn _stop_music_track(&mut self) {
self.music_track.stop();
// Sinks cannot be reused after stopping. Below code replaces sink.
// TODO: Set sink volume as stored in settings?
let music_device = rodio::default_output_device().unwrap();
self.music_track = rodio::Sink::new(&music_device);
self.music_track.set_volume(self.music_volume);
}
pub fn play_sound_effect(&mut self, file_name: &str) {
if !self.se_track.empty() {
self.se_track.stop();
let se_device = rodio::default_output_device().unwrap();
self.se_track = rodio::Sink::new(&se_device);
self.se_track.set_volume(self.se_volume);
}
let file = Cursor::new(ASSETS_DIR.get_file(file_name).unwrap().contents());
self.se_track.append(Decoder::new(file).unwrap());
self.se_track.play();
}
}
| 32.853333 | 83 | 0.616477 |
e6bca6496a7ae2cdd482b0f47d02b9fa5359d255
| 4,175 |
// Generated from definition io.k8s.api.extensions.v1beta1.HTTPIngressPath
/// HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct HTTPIngressPath {
/// Backend defines the referenced service endpoint to which the traffic will be forwarded to.
pub backend: crate::v1_9::api::extensions::v1beta1::IngressBackend,
/// Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.
pub path: Option<String>,
}
impl<'de> serde::Deserialize<'de> for HTTPIngressPath {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_backend,
Key_path,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"backend" => Field::Key_backend,
"path" => Field::Key_path,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = HTTPIngressPath;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct HTTPIngressPath")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_backend: Option<crate::v1_9::api::extensions::v1beta1::IngressBackend> = None;
let mut value_path: Option<String> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_backend => value_backend = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Key_path => value_path = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(HTTPIngressPath {
backend: value_backend.ok_or_else(|| serde::de::Error::missing_field("backend"))?,
path: value_path,
})
}
}
deserializer.deserialize_struct(
"HTTPIngressPath",
&[
"backend",
"path",
],
Visitor,
)
}
}
impl serde::Serialize for HTTPIngressPath {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"HTTPIngressPath",
1 +
self.path.as_ref().map_or(0, |_| 1),
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "backend", &self.backend)?;
if let Some(value) = &self.path {
serde::ser::SerializeStruct::serialize_field(&mut state, "path", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
| 42.171717 | 405 | 0.547305 |
dd907476427c2958cd8e93a82c59b59c9f7abfb0
| 6,678 |
use chrono::Duration;
use rgb::RGBA8;
use serde::ser::{self, Serializer};
#[derive(Debug, thiserror::Error)]
enum Error {
#[error("duration of {} days is too big to be serialized as nanoseconds", .0.num_days())]
DurationTooBig(Duration),
}
pub fn duration_millis_opt<S>(value: &Option<Duration>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match value {
Some(duration) => serializer.serialize_some(&duration.num_milliseconds()),
None => serializer.serialize_none(),
}
}
pub fn duration_millis<S>(value: &Duration, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_i64(value.num_milliseconds())
}
pub fn duration_nanos<S>(value: &Duration, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match value.num_nanoseconds() {
Some(nanos) => serializer.serialize_i64(nanos),
None => Err(ser::Error::custom(Error::DurationTooBig(*value))),
}
}
pub fn bitflags_u8_opt<S, T>(value: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: Into<u8> + Copy,
{
match value {
Some(flags) => serializer.serialize_some(&(*flags).into()),
None => serializer.serialize_none(),
}
}
pub fn rgba8_inverse_opt<S>(value: &Option<RGBA8>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match value {
Some(rgba) => {
let abgr = (rgba.a as u32) << 24
| (rgba.b as u32) << 16
| (rgba.g as u32) << 8
| (rgba.r as u32);
serializer.serialize_some(&abgr)
}
None => serializer.serialize_none(),
}
}
#[cfg(test)]
mod tests {
use bitflags::bitflags;
use serde::Serialize;
use serde_test::{assert_ser_tokens, assert_ser_tokens_error, Token};
use super::*;
#[test]
fn ser_duration_millis_opt() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "duration_millis_opt")]
value: Option<Duration>,
}
assert_ser_tokens(
&SimpleDuration {
value: Some(Duration::milliseconds(150)),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::I64(150),
Token::StructEnd,
],
);
assert_ser_tokens(
&SimpleDuration { value: None },
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
}
#[test]
fn ser_duration_millis() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "duration_millis")]
value: Duration,
}
assert_ser_tokens(
&SimpleDuration {
value: Duration::milliseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(150),
Token::StructEnd,
],
);
}
#[test]
fn ser_duration_nanos() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "duration_nanos")]
value: Duration,
}
assert_ser_tokens(
&SimpleDuration {
value: Duration::nanoseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(150),
Token::StructEnd,
],
);
assert_ser_tokens_error(
&SimpleDuration {
value: Duration::days(365_000_000),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
],
"duration of 365000000 days is too big to be serialized as nanoseconds",
);
}
#[test]
fn ser_bitflags_u8_opt() {
bitflags! {
struct Flags: u8 {
const ONE = 1;
const TWO = 2;
}
}
impl From<Flags> for u8 {
fn from(value: Flags) -> Self {
value.bits
}
}
#[derive(Serialize)]
struct SimpleFlags {
#[serde(serialize_with = "bitflags_u8_opt")]
value: Option<Flags>,
}
assert_ser_tokens(
&SimpleFlags {
value: Some(Flags::ONE | Flags::TWO),
},
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::U8(3),
Token::StructEnd,
],
);
assert_ser_tokens(
&SimpleFlags { value: None },
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
}
#[test]
fn ser_rgba8_inverse_opt() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "rgba8_inverse_opt")]
value: Option<RGBA8>,
}
assert_ser_tokens(
&SimpleDuration {
value: Some(RGBA8::new(1, 2, 3, 4)),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::U32(0x04030201),
Token::StructEnd,
],
);
assert_ser_tokens(
&SimpleDuration { value: None },
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
}
}
| 25.48855 | 97 | 0.4413 |
9befdf30184063f2f6791ffa29b6a47da45d6021
| 7,210 |
/*
* Copyright 2019 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Features we use
#![feature(box_syntax)]
//
// Plugins
#![cfg_attr(feature = "custom_linter", feature(plugin))]
#![cfg_attr(feature = "custom_linter", allow(deprecated))] // :(
#![cfg_attr(feature = "custom_linter", plugin(gazebo_lint))]
// Disagree these are good hints
#![allow(clippy::type_complexity)]
use std::{ffi::OsStr, fmt, fmt::Display, fs, path::PathBuf, sync::Arc};
use anyhow::anyhow;
use eval::Context;
use gazebo::prelude::*;
use itertools::Either;
use starlark::read_line::ReadLine;
use structopt::{clap::AppSettings, StructOpt};
use walkdir::WalkDir;
use crate::types::{LintMessage, Message, Severity};
mod dap;
mod eval;
mod lsp;
mod types;
#[derive(Debug, StructOpt)]
#[structopt(
name = "starlark",
about = "Evaluate Starlark code",
global_settings(&[AppSettings::ColoredHelp]),
)]
pub struct Args {
#[structopt(
long = "interactive",
long = "repl",
short = "i",
help = "Start an interactive REPL."
)]
interactive: bool,
#[structopt(long = "lsp", help = "Start an LSP server.")]
lsp: bool,
#[structopt(long = "dap", help = "Start a DAP server.")]
dap: bool,
#[structopt(long = "check", help = "Run checks and lints.")]
check: bool,
#[structopt(long = "info", help = "Show information about the code.")]
info: bool,
#[structopt(long = "json", help = "Show output as JSON lines.")]
json: bool,
#[structopt(
long = "repeat",
help = "Number of times to repeat the execution",
default_value = "1"
)]
repeat: usize,
#[structopt(
long = "extension",
help = "File extension when searching directories."
)]
extension: Option<String>,
#[structopt(long = "prelude", help = "Files to load in advance.")]
prelude: Vec<PathBuf>,
#[structopt(
long = "expression",
short = "e",
name = "EXPRESSION",
help = "Expressions to evaluate."
)]
evaluate: Vec<String>,
#[structopt(name = "FILE", help = "Files to evaluate.")]
// String instead of PathBuf so we can expand @file things
files: Vec<String>,
}
// We'd really like clap to deal with args-files, but it doesn't yet
// Waiting on: https://github.com/clap-rs/clap/issues/1693.
// This is a minimal version to make basic @file options work.
fn expand_args(args: Vec<String>) -> anyhow::Result<Vec<PathBuf>> {
let mut res = Vec::with_capacity(args.len());
for x in args {
match x.strip_prefix('@') {
None => res.push(PathBuf::from(x)),
Some(x) => {
let src = fs::read_to_string(x)?;
for x in src.lines() {
res.push(PathBuf::from(x));
}
}
}
}
Ok(res)
}
// Treat directories as things to recursively walk for .<extension> files,
// and everything else as normal files.
fn expand_dirs(extension: &str, xs: Vec<PathBuf>) -> impl Iterator<Item = PathBuf> {
let extension = Arc::new(extension.to_owned());
xs.into_iter().flat_map(move |x| {
// Have to keep cloning extension so we keep ownership
let extension = extension.dupe();
if x.is_dir() {
Either::Left(
WalkDir::new(x)
.into_iter()
.filter_map(|e| e.ok())
.filter(move |e| e.path().extension() == Some(OsStr::new(extension.as_str())))
.map(|e| e.into_path()),
)
} else {
Either::Right(box vec![x].into_iter())
}
})
}
#[derive(Default)]
struct Stats {
file: usize,
error: usize,
warning: usize,
advice: usize,
disabled: usize,
}
impl Display for Stats {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&format!(
"{} files, {} errors, {} warnings, {} advices, {} disabled",
self.file, self.error, self.warning, self.advice, self.disabled
))
}
}
impl Stats {
fn increment_file(&mut self) {
self.file += 1;
}
fn increment(&mut self, x: Severity) {
match x {
Severity::Error => self.error += 1,
Severity::Warning => self.warning += 1,
Severity::Advice => self.advice += 1,
Severity::Disabled => self.disabled += 1,
}
}
}
fn drain(xs: impl Iterator<Item = Message>, json: bool, stats: &mut Stats) {
for x in xs {
stats.increment(x.severity);
if json {
println!("{}", serde_json::to_string(&LintMessage::new(x)).unwrap());
} else if let Some(error) = x.full_error_with_span {
let mut error = error.to_owned();
if !error.is_empty() && !error.ends_with('\n') {
error.push('\n');
}
print!("{}", error);
} else {
println!("{}", x);
}
}
}
fn interactive(ctx: &Context) -> anyhow::Result<()> {
let mut rl = ReadLine::new();
loop {
match rl.read_line("$> ")? {
Some(line) => {
let mut stats = Stats::default();
drain(ctx.expression(line), false, &mut stats);
}
// User pressed EOF - disconnected terminal, or similar
None => return Ok(()),
}
}
}
fn main() -> anyhow::Result<()> {
let args = Args::from_args();
let ext = args
.extension
.as_ref()
.map_or("bzl", |x| x.as_str())
.trim_start_match('.');
let mut ctx = Context::new(
args.check,
args.info,
!args.check && !args.info,
&expand_dirs(ext, args.prelude).collect::<Vec<_>>(),
args.interactive,
)?;
let mut stats = Stats::default();
for _ in 0..args.repeat {
for e in args.evaluate.clone() {
stats.increment_file();
drain(ctx.expression(e), args.json, &mut stats);
}
for file in expand_dirs(ext, expand_args(args.files.clone())?) {
stats.increment_file();
drain(ctx.file(&file), args.json, &mut stats);
}
}
if args.interactive {
interactive(&ctx)?;
}
if args.lsp {
ctx.check = true;
ctx.info = false;
ctx.run = false;
lsp::server(ctx)?;
} else if args.dap {
dap::server()
}
if !args.json {
println!("{}", stats);
if stats.error > 0 {
return Err(anyhow!("Failed with {} errors", stats.error));
}
}
Ok(())
}
| 28.054475 | 98 | 0.560055 |
e5b84af18106e1e7618509cb82d18bb4fe40f848
| 327 |
use color_maps::html::HTML_MAP;
pub fn color_map_adaptor(html_name: &str) -> String {
if html_name.starts_with('#') {
return html_name.to_string();
}
if let Some(col) = HTML_MAP.get(html_name) {
format!("#{:02x}{:02x}{:02x}", col.0, col.1, col.2)
} else {
html_name.to_string()
}
}
| 25.153846 | 59 | 0.587156 |
e849c5a5708906d13cb7e7c666d55a49483fe92c
| 7,748 |
// Cooperative optimization solver
use std::collections::BinaryHeap;
use std::io::Write;
extern crate rand;
#[macro_use(max)]
extern crate gelpia_utils;
extern crate gr;
use gelpia_utils::{Quple, INF, NINF, Flt, Parameters, eps_tol, check_diff};
use gr::{GI, width_box, split_box, midpoint_box};
use std::sync::{Barrier, RwLock, Arc, RwLockWriteGuard};
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::Duration;
extern crate function;
use function::FuncObj;
extern crate args;
use args::{process_args};
extern crate time;
/// Returns the guaranteed upperbound for the algorithm
/// from the queue.
fn get_upper_bound(q: &RwLockWriteGuard<BinaryHeap<Quple>>,
f_best_high: f64) -> f64{
let mut max = f_best_high;
for qi in q.iter() {
max = max!{max, qi.fdata.upper()};
}
max
}
fn log_max(q: &RwLockWriteGuard<BinaryHeap<Quple>>,
f_best_low: f64,
f_best_high: f64) {
let max = get_upper_bound(q, f_best_high);
let _ = writeln!(&mut std::io::stderr(),
"lb: {}, possible ub: {}, guaranteed ub: {}",
f_best_low,
f_best_high,
max);
}
#[allow(dead_code)]
fn print_q(q: &RwLockWriteGuard<BinaryHeap<Quple>>) {
let mut lq: BinaryHeap<Quple> = (*q).clone();
while lq.len() != 0 {
let qi = lq.pop().unwrap();
let (gen, v, _) = (qi.pf, qi.p, qi.fdata);
print!("[{}, {}, {}], ", v, gen, qi.fdata.to_string());
}
println!("\n");
}
/// Returns a tuple (function_estimate, eval_interval)
/// # Arguments
/// * `f` - The function to evaluate with
/// * `input` - The input domain
fn est_func(f: &FuncObj, input: &Vec<GI>) -> (Flt, GI, Option<Vec<GI>>) {
let mid = midpoint_box(input);
let (est_m, _) = f.call(&mid);
let (fsx, dfsx) = f.call(&input);
let (fsx_u, _) = f.call(&input.iter()
.map(|&si| GI::new_p(si.upper()))
.collect::<Vec<_>>());
let (fsx_l, _) = f.call(&input.iter()
.map(|&si| GI::new_p(si.lower()))
.collect::<Vec<_>>());
let est_max = est_m.lower().max(fsx_u.lower()).max(fsx_l.lower());
(est_max, fsx, dfsx)
}
// Returns the upper bound, the domain where this bound occurs and a status
// flag indicating whether the answer is complete for the problem.
fn ibba(x_0: Vec<GI>, e_x: Flt, e_f: Flt, e_f_r: Flt,
f_bestag: Arc<RwLock<Flt>>,
f_best_shared: Arc<RwLock<Flt>>,
x_bestbb: Arc<RwLock<Vec<GI>>>,
b1: Arc<Barrier>, b2: Arc<Barrier>,
q: Arc<RwLock<BinaryHeap<Quple>>>,
sync: Arc<AtomicBool>, stop: Arc<AtomicBool>,
f: FuncObj,
logging: bool, max_iters: u32)
-> (Flt, Flt, Vec<GI>) {
let mut best_x = x_0.clone();
let mut iters: u32 = 0;
let (est_max, first_val, _) = est_func(&f, &x_0);
q.write().unwrap().push(Quple{p: est_max, pf: 0, data: x_0.clone(),
fdata: first_val, dfdata: None});
let mut f_best_low = est_max;
let mut f_best_high = est_max;
while q.read().unwrap().len() != 0 && !stop.load(Ordering::Acquire) {
if max_iters != 0 && iters >= max_iters {
break;
}
// Take q as writable during an iteration
let mut q = q.write().unwrap();
let fbl_orig = f_best_low;
f_best_low = max!(f_best_low, *f_bestag.read().unwrap());
if iters % 2048 == 0 {
let guaranteed_bound = get_upper_bound(&q, f_best_high);
if (guaranteed_bound - f_best_high).abs() < e_f {
f_best_high = guaranteed_bound;
break;
}
}
if logging && fbl_orig != f_best_low {
log_max(&q, f_best_low, f_best_high);
}
let (ref x, iter_est, fx, ref dfx, gen) =
match q.pop() {
Some(y) => (y.data, y.p, y.fdata, y.dfdata, y.pf),
None => unreachable!()
};
if check_diff(dfx.clone(), x, &x_0) {
continue;
}
if fx.upper() < f_best_low ||
width_box(x, e_x) ||
eps_tol(fx, iter_est, e_f, e_f_r) {
{
if f_best_high < fx.upper() {
f_best_high = fx.upper();
best_x = x.clone();
if logging {
log_max(&q, f_best_low, f_best_high);
}
}
continue;
}
}
else {
let (x_s, is_split) = split_box(&x);
for sx in x_s {
let (est_max, fsx, dfsx) = est_func(&f, &sx);
if f_best_low < est_max {
f_best_low = est_max;
*x_bestbb.write().unwrap() = sx.clone();
}
iters += 1;
if is_split {
q.push(Quple{p: est_max,
pf: gen+1,
data: sx,
fdata: fsx,
dfdata:dfsx});
}
}
}
}
stop.store(true, Ordering::Release);
(f_best_low, f_best_high, best_x)
}
fn main() {
let args = process_args();
let ref x_0 = args.domain;
let ref fo = args.function;
let x_err = args.x_error;
let y_err = args.y_error;
let y_rel = args.y_error_rel;
let seed = args.seed;
// Early out if there are no input variables...
if x_0.len() == 0 {
let result = fo.call(&x_0).0;
println!("[[{},{}], {{}}]", result.lower(), result.upper());
return
}
let q_inner: BinaryHeap<Quple> = BinaryHeap::new();
let q = Arc::new(RwLock::new(q_inner));
let b1 = Arc::new(Barrier::new(3));
let b2 = Arc::new(Barrier::new(3));
let sync = Arc::new(AtomicBool::new(false));
let stop = Arc::new(AtomicBool::new(false));
let f_bestag: Arc<RwLock<Flt>> = Arc::new(RwLock::new(NINF));
let f_best_shared: Arc<RwLock<Flt>> = Arc::new(RwLock::new(NINF));
let x_e = x_0.clone();
let x_i = x_0.clone();
let x_bestbb = Arc::new(RwLock::new(x_0.clone()));
let ibba_thread =
{
let q = q.clone();
let b1 = b1.clone();
let b2 = b2.clone();
let f_bestag = f_bestag.clone();
let f_best_shared = f_best_shared.clone();
let x_bestbb = x_bestbb.clone();
let sync = sync.clone();
let stop = stop.clone();
let fo_c = fo.clone();
let logging = args.logging;
let iters= args.iters;
thread::Builder::new().name("IBBA".to_string()).spawn(move || {
ibba(x_i, x_err, y_err, y_rel,
f_bestag, f_best_shared,
x_bestbb,
b1, b2, q, sync, stop, fo_c, logging, iters)
})};
let result = ibba_thread.unwrap().join();
if result.is_ok() {
let (min, mut max, mut interval) = result.unwrap();
// Go through all remaining intervals from IBBA to find the true
// max
let ref lq = q.read().unwrap();
for i in lq.iter() {
let ref top = *i;
let (ub, dom) = (top.fdata.upper(), &top.data);
if ub > max {
max = ub;
interval = dom.clone();
}
}
println!("[[{},{}], {{", min, max);
for i in 0..args.names.len() {
println!("'{}' : {},", args.names[i], interval[i].to_string());
}
println!("}}]");
}
else {println!("error")}
}
| 30.868526 | 75 | 0.505292 |
1d1067b359d2c2c5097501df3f57e6805c333f0b
| 1,031 |
use crate::timer;
use crate::{camera, render_output::screen};
use crate::{renderer, wgpu_utils::*};
use uniformbuffer::UniformBuffer;
#[repr(C)]
#[derive(Clone, Copy)]
pub struct GlobalUBOContent {
camera: camera::CameraUniformBufferContent,
time: timer::FrameTimeUniformBufferContent,
rendering: renderer::GlobalRenderSettingsUniformBufferContent,
screen: screen::ScreenUniformBufferContent,
}
unsafe impl bytemuck::Pod for GlobalUBOContent {}
unsafe impl bytemuck::Zeroable for GlobalUBOContent {}
pub type GlobalUBO = UniformBuffer<GlobalUBOContent>;
pub fn update_global_ubo(
ubo: &mut GlobalUBO,
queue: &wgpu::Queue,
camera: camera::CameraUniformBufferContent,
time: timer::FrameTimeUniformBufferContent,
rendering: renderer::GlobalRenderSettingsUniformBufferContent,
screen: screen::ScreenUniformBufferContent,
) {
ubo.update_content(
queue,
GlobalUBOContent {
camera,
time,
rendering,
screen,
},
);
}
| 27.864865 | 66 | 0.70999 |
9c0d33f92801537baac83fb8693844fb8c79d772
| 4,064 |
use crate::utils::{
get_trait_def_id, if_sequence, implements_trait, parent_node_is_if_expr, paths, span_lint_and_help, SpanlessEq,
};
use rustc_hir::{BinOpKind, Expr, ExprKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// **What it does:** Checks comparison chains written with `if` that can be
/// rewritten with `match` and `cmp`.
///
/// **Why is this bad?** `if` is not guaranteed to be exhaustive and conditionals can get
/// repetitive
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust,ignore
/// # fn a() {}
/// # fn b() {}
/// # fn c() {}
/// fn f(x: u8, y: u8) {
/// if x > y {
/// a()
/// } else if x < y {
/// b()
/// } else {
/// c()
/// }
/// }
/// ```
///
/// Could be written:
///
/// ```rust,ignore
/// use std::cmp::Ordering;
/// # fn a() {}
/// # fn b() {}
/// # fn c() {}
/// fn f(x: u8, y: u8) {
/// match x.cmp(&y) {
/// Ordering::Greater => a(),
/// Ordering::Less => b(),
/// Ordering::Equal => c()
/// }
/// }
/// ```
pub COMPARISON_CHAIN,
style,
"`if`s that can be rewritten with `match` and `cmp`"
}
declare_lint_pass!(ComparisonChain => [COMPARISON_CHAIN]);
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ComparisonChain {
fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr<'_>) {
if expr.span.from_expansion() {
return;
}
// We only care about the top-most `if` in the chain
if parent_node_is_if_expr(expr, cx) {
return;
}
// Check that there exists at least one explicit else condition
let (conds, _) = if_sequence(expr);
if conds.len() < 2 {
return;
}
for cond in conds.windows(2) {
if let (
&ExprKind::Binary(ref kind1, ref lhs1, ref rhs1),
&ExprKind::Binary(ref kind2, ref lhs2, ref rhs2),
) = (&cond[0].kind, &cond[1].kind)
{
if !kind_is_cmp(kind1.node) || !kind_is_cmp(kind2.node) {
return;
}
// Check that both sets of operands are equal
let mut spanless_eq = SpanlessEq::new(cx);
let same_fixed_operands = spanless_eq.eq_expr(lhs1, lhs2) && spanless_eq.eq_expr(rhs1, rhs2);
let same_transposed_operands = spanless_eq.eq_expr(lhs1, rhs2) && spanless_eq.eq_expr(rhs1, lhs2);
if !same_fixed_operands && !same_transposed_operands {
return;
}
// Check that if the operation is the same, either it's not `==` or the operands are transposed
if kind1.node == kind2.node {
if kind1.node == BinOpKind::Eq {
return;
}
if !same_transposed_operands {
return;
}
}
// Check that the type being compared implements `core::cmp::Ord`
let ty = cx.tables().expr_ty(lhs1);
let is_ord = get_trait_def_id(cx, &paths::ORD).map_or(false, |id| implements_trait(cx, ty, id, &[]));
if !is_ord {
return;
}
} else {
// We only care about comparison chains
return;
}
}
span_lint_and_help(
cx,
COMPARISON_CHAIN,
expr.span,
"`if` chain can be rewritten with `match`",
None,
"Consider rewriting the `if` chain to use `cmp` and `match`.",
)
}
}
fn kind_is_cmp(kind: BinOpKind) -> bool {
match kind {
BinOpKind::Lt | BinOpKind::Gt | BinOpKind::Eq => true,
_ => false,
}
}
| 31.261538 | 117 | 0.483268 |
c135c3e97593f65c12bfa22fce5bdc99cd83381a
| 46 |
pub mod solid_color;
pub use solid_color::*;
| 11.5 | 23 | 0.73913 |
e8c3b2480d9aec8fcee961b0ed0c8e0357488b08
| 431 |
// exec-env:RUST_POISON_ON_FREE=1
// Test that we root `x` even though it is found in immutable memory,
// because it is moved.
#[feature(managed_boxes)];
fn free<T>(x: @T) {}
struct Foo {
f: @Bar
}
struct Bar {
g: int
}
fn lend(x: @Foo) -> int {
let y = &x.f.g;
free(x); // specifically here, if x is not rooted, it will be freed
*y
}
pub fn main() {
assert_eq!(lend(@Foo {f: @Bar {g: 22}}), 22);
}
| 15.962963 | 71 | 0.584687 |
2f70847ab6029aa399a7fcc80d51967c717a9ba9
| 157 |
// functions1.rs
// Make me compile! Execute `rustlings hint functions1` for hints :)
fn call_me() {
println!("haha!");
}
fn main() {
call_me();
}
| 14.272727 | 68 | 0.611465 |
48834c91ddcac975b7fc52498761b22978701959
| 2,227 |
use game_input_model::config::ControllerId;
use serde::{Deserialize, Serialize};
use structopt_derive::StructOpt;
/// Parameters to the mapper.
///
/// # Examples
///
/// * `asset_selection return`
/// * `asset_selection join -c 0`
/// * `asset_selection leave -c 0`
/// * `asset_selection switch -c 0 -s default/heat`
/// * `asset_selection select -c 0 -s default/heat`
/// * `asset_selection deselect -c 0`
/// * `asset_selection confirm`
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, StructOpt)]
#[serde(deny_unknown_fields, rename_all = "snake_case")]
#[structopt(rename_all = "snake_case")]
pub enum AssetSelectionEventArgs {
/// Signal to return from `AssetSelectionState`.
Return,
/// Player has joined / become active.
Join {
/// Controller ID.
///
/// 0 for the first player, 1 for the second player, etcetera.
#[structopt(short, long)]
controller_id: ControllerId,
},
/// Player has left / become inactive.
Leave {
/// Controller ID.
///
/// 0 for the first player, 1 for the second player, etcetera.
#[structopt(short, long)]
controller_id: ControllerId,
},
/// Asset has been selected.
Switch {
/// Controller ID.
///
/// 0 for the first player, 1 for the second player, etcetera.
#[structopt(short, long)]
controller_id: ControllerId,
/// Slug of the asset or random, e.g. "default/heat", "random".
#[structopt(short, long)]
selection: String,
},
/// Asset has been selected.
Select {
/// Controller ID.
///
/// 0 for the first player, 1 for the second player, etcetera.
#[structopt(short, long)]
controller_id: ControllerId,
/// Slug of the asset or random, e.g. "default/heat", "random".
#[structopt(short, long)]
selection: String,
},
/// Asset has been deselected.
Deselect {
/// Controller ID.
///
/// 0 for the first player, 1 for the second player, etcetera.
#[structopt(short, long)]
controller_id: ControllerId,
},
/// Asset selections have been confirmed.
Confirm,
}
| 31.366197 | 71 | 0.599461 |
621e190696137c7fe7d792733ab0f7978acaa23f
| 30,540 |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::cmp;
use core::mem;
use core::ops::Drop;
use core::ptr::{self, Unique};
use core::slice;
use heap::{Alloc, Layout, Heap};
use super::boxed::Box;
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces Unique::empty() on zero-sized types
/// * Produces Unique::empty() on zero-length allocations
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
/// * Guards against 32-bit systems allocating more than isize::MAX bytes
/// * Guards against overflowing your length
/// * Aborts on OOM
/// * Avoids freeing Unique::empty()
/// * Contains a ptr::Unique and thus endows the user with all related benefits
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
/// to handle the actual things *stored* inside of a RawVec.
///
/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
/// This enables you to use capacity growing logic catch the overflows in your length
/// that might occur with zero-sized types.
///
/// However this means that you need to be careful when roundtripping this type
/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[allow(missing_debug_implementations)]
pub struct RawVec<T, A: Alloc = Heap> {
ptr: Unique<T>,
cap: usize,
a: A,
}
impl<T, A: Alloc> RawVec<T, A> {
/// Like `new` but parameterized over the choice of allocator for
/// the returned RawVec.
pub fn new_in(a: A) -> Self {
// !0 is usize::MAX. This branch should be stripped at compile time.
let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
// Unique::empty() doubles as "unallocated" and "zero-sized allocation"
RawVec {
ptr: Unique::empty(),
cap,
a,
}
}
/// Like `with_capacity` but parameterized over the choice of
/// allocator for the returned RawVec.
#[inline]
pub fn with_capacity_in(cap: usize, a: A) -> Self {
RawVec::allocate_in(cap, false, a)
}
/// Like `with_capacity_zeroed` but parameterized over the choice
/// of allocator for the returned RawVec.
#[inline]
pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self {
RawVec::allocate_in(cap, true, a)
}
fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
unsafe {
let elem_size = mem::size_of::<T>();
let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(alloc_size);
// handles ZSTs and `cap = 0` alike
let ptr = if alloc_size == 0 {
mem::align_of::<T>() as *mut u8
} else {
let align = mem::align_of::<T>();
let result = if zeroed {
a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap())
} else {
a.alloc(Layout::from_size_align(alloc_size, align).unwrap())
};
match result {
Ok(ptr) => ptr,
Err(err) => a.oom(err),
}
};
RawVec {
ptr: Unique::new_unchecked(ptr as *mut _),
cap,
a,
}
}
}
}
impl<T> RawVec<T, Heap> {
/// Creates the biggest possible RawVec (on the system heap)
/// without allocating. If T has positive size, then this makes a
/// RawVec with capacity 0. If T has 0 size, then it makes a
/// RawVec with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub fn new() -> Self {
Self::new_in(Heap)
}
/// Creates a RawVec (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; cap]`. This is
/// equivalent to calling RawVec::new when `cap` is 0 or T is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a RawVec with the requested capacity!
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
#[inline]
pub fn with_capacity(cap: usize) -> Self {
RawVec::allocate_in(cap, false, Heap)
}
/// Like `with_capacity` but guarantees the buffer is zeroed.
#[inline]
pub fn with_capacity_zeroed(cap: usize) -> Self {
RawVec::allocate_in(cap, true, Heap)
}
}
impl<T, A: Alloc> RawVec<T, A> {
/// Reconstitutes a RawVec from a pointer, capacity, and allocator.
///
/// # Undefined Behavior
///
/// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
/// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self {
RawVec {
ptr: Unique::new_unchecked(ptr),
cap,
a,
}
}
}
impl<T> RawVec<T, Heap> {
/// Reconstitutes a RawVec from a pointer, capacity.
///
/// # Undefined Behavior
///
/// The ptr must be allocated (on the system heap), and with the given capacity. The
/// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the ptr and capacity come from a RawVec, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
RawVec {
ptr: Unique::new_unchecked(ptr),
cap,
a: Heap,
}
}
/// Converts a `Box<[T]>` into a `RawVec<T>`.
pub fn from_box(mut slice: Box<[T]>) -> Self {
unsafe {
let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len());
mem::forget(slice);
result
}
}
}
impl<T, A: Alloc> RawVec<T, A> {
/// Gets a raw pointer to the start of the allocation. Note that this is
/// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
/// be careful.
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn cap(&self) -> usize {
if mem::size_of::<T>() == 0 {
!0
} else {
self.cap
}
}
/// Returns a shared reference to the allocator backing this RawVec.
pub fn alloc(&self) -> &A {
&self.a
}
/// Returns a mutable reference to the allocator backing this RawVec.
pub fn alloc_mut(&mut self) -> &mut A {
&mut self.a
}
fn current_layout(&self) -> Option<Layout> {
if self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
Some(Layout::from_size_align_unchecked(size, align))
}
}
}
/// Doubles the size of the type's backing allocation. This is common enough
/// to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
///
/// This function is ideal for when pushing elements one-at-a-time because
/// you don't need to incur the costs of the more general computations
/// reserve needs to do to guard against overflow. You do however need to
/// manually check if your `len == cap`.
///
/// # Panics
///
/// * Panics if T is zero-sized on the assumption that you managed to exhaust
/// all `usize::MAX` slots in your imaginary buffer.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// # extern crate alloc;
/// # use std::ptr;
/// # use alloc::raw_vec::RawVec;
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T> MyVec<T> {
/// pub fn push(&mut self, elem: T) {
/// if self.len == self.buf.cap() { self.buf.double(); }
/// // double would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// unsafe {
/// ptr::write(self.buf.ptr().offset(self.len as isize), elem);
/// }
/// self.len += 1;
/// }
/// }
/// # fn main() {
/// # let mut vec = MyVec { buf: RawVec::new(), len: 0 };
/// # vec.push(1);
/// # }
/// ```
#[inline(never)]
#[cold]
pub fn double(&mut self) {
unsafe {
let elem_size = mem::size_of::<T>();
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
let (new_cap, uniq) = match self.current_layout() {
Some(cur) => {
// Since we guarantee that we never allocate more than
// isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as
// a precondition, so this can't overflow. Additionally the
// alignment will never be too large as to "not be
// satisfiable", so `Layout::from_size_align` will always
// return `Some`.
//
// tl;dr; we bypass runtime checks due to dynamic assertions
// in this module, allowing us to use
// `from_size_align_unchecked`.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
let new_layout = Layout::from_size_align_unchecked(new_size, cur.align());
alloc_guard(new_size);
let ptr_res = self.a.realloc(self.ptr.as_ptr() as *mut u8,
cur,
new_layout);
match ptr_res {
Ok(ptr) => (new_cap, Unique::new_unchecked(ptr as *mut T)),
Err(e) => self.a.oom(e),
}
}
None => {
// skip to 4 because tiny Vec's are dumb; but not if that
// would cause overflow
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
match self.a.alloc_array::<T>(new_cap) {
Ok(ptr) => (new_cap, ptr.into()),
Err(e) => self.a.oom(e),
}
}
};
self.ptr = uniq;
self.cap = new_cap;
}
}
/// Attempts to double the size of the type's backing allocation in place. This is common
/// enough to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
///
/// Returns true if the reallocation attempt has succeeded, or false otherwise.
///
/// # Panics
///
/// * Panics if T is zero-sized on the assumption that you managed to exhaust
/// all `usize::MAX` slots in your imaginary buffer.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
#[inline(never)]
#[cold]
pub fn double_in_place(&mut self) -> bool {
unsafe {
let elem_size = mem::size_of::<T>();
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false, // nothing to double
};
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
// Since we guarantee that we never allocate more than isize::MAX
// bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
// this can't overflow.
//
// Similarly like with `double` above we can go straight to
// `Layout::from_size_align_unchecked` as we know this won't
// overflow and the alignment is sufficiently small.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
alloc_guard(new_size);
let ptr = self.ptr() as *mut _;
let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
match self.a.grow_in_place(ptr, old_layout, new_layout) {
Ok(_) => {
// We can't directly divide `size`.
self.cap = new_cap;
true
}
Err(_) => {
false
}
}
}
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already,
/// will reallocate the minimum possible amount of memory necessary.
/// Generally this will be exactly the amount of memory necessary,
/// but in principle the allocator is free to give back more than
/// we asked for.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they gave a bad `used_cap`.
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return;
}
// Nothing we can really do about these checks :(
let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow");
let new_layout = match Layout::array::<T>(new_cap) {
Some(layout) => layout,
None => panic!("capacity overflow"),
};
alloc_guard(new_layout.size());
let res = match self.current_layout() {
Some(layout) => {
let old_ptr = self.ptr.as_ptr() as *mut u8;
self.a.realloc(old_ptr, layout, new_layout)
}
None => self.a.alloc(new_layout),
};
let uniq = match res {
Ok(ptr) => Unique::new_unchecked(ptr as *mut T),
Err(e) => self.a.oom(e),
};
self.ptr = uniq;
self.cap = new_cap;
}
}
/// Calculates the buffer's new size given that it'll hold `used_cap +
/// needed_extra_cap` elements. This logic is used in amortized reserve methods.
/// Returns `(new_capacity, new_alloc_size)`.
fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> usize {
// Nothing we can really do about these checks :(
let required_cap = used_cap.checked_add(needed_extra_cap)
.expect("capacity overflow");
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
let double_cap = self.cap * 2;
// `double_cap` guarantees exponential growth.
cmp::max(double_cap, required_cap)
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate enough space plus comfortable slack
/// space to get amortized `O(1)` behavior. Will limit this behavior
/// if it would needlessly cause itself to panic.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// # extern crate alloc;
/// # use std::ptr;
/// # use alloc::raw_vec::RawVec;
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T: Clone> MyVec<T> {
/// pub fn push_all(&mut self, elems: &[T]) {
/// self.buf.reserve(self.len, elems.len());
/// // reserve would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// for x in elems {
/// unsafe {
/// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
/// }
/// self.len += 1;
/// }
/// }
/// }
/// # fn main() {
/// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
/// # vector.push_all(&[1, 3, 5, 7, 9]);
/// # }
/// ```
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they give a bad `used_cap`
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return;
}
let new_cap = self.amortized_new_size(used_cap, needed_extra_cap);
let new_layout = match Layout::array::<T>(new_cap) {
Some(layout) => layout,
None => panic!("capacity overflow"),
};
// FIXME: may crash and burn on over-reserve
alloc_guard(new_layout.size());
let res = match self.current_layout() {
Some(layout) => {
let old_ptr = self.ptr.as_ptr() as *mut u8;
self.a.realloc(old_ptr, layout, new_layout)
}
None => self.a.alloc(new_layout),
};
let uniq = match res {
Ok(ptr) => Unique::new_unchecked(ptr as *mut T),
Err(e) => self.a.oom(e),
};
self.ptr = uniq;
self.cap = new_cap;
}
}
/// Attempts to ensure that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate in place enough space plus comfortable slack
/// space to get amortized `O(1)` behavior. Will limit this behaviour
/// if it would needlessly cause itself to panic.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// Returns true if the reallocation attempt has succeeded, or false otherwise.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity. If the current `cap` is 0, we can't
// reallocate in place.
// Wrapping in case they give a bad `used_cap`
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false,
};
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return false;
}
let new_cap = self.amortized_new_size(used_cap, needed_extra_cap);
// Here, `cap < used_cap + needed_extra_cap <= new_cap`
// (regardless of whether `self.cap - used_cap` wrapped).
// Therefore we can safely call grow_in_place.
let ptr = self.ptr() as *mut _;
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
// FIXME: may crash and burn on over-reserve
alloc_guard(new_layout.size());
match self.a.grow_in_place(ptr, old_layout, new_layout) {
Ok(_) => {
self.cap = new_cap;
true
}
Err(_) => {
false
}
}
}
}
/// Shrinks the allocation down to the specified amount. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
pub fn shrink_to_fit(&mut self, amount: usize) {
let elem_size = mem::size_of::<T>();
// Set the `cap` because they might be about to promote to a `Box<[T]>`
if elem_size == 0 {
self.cap = amount;
return;
}
// This check is my waterloo; it's the only thing Vec wouldn't have to do.
assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
if amount == 0 {
// We want to create a new zero-length vector within the
// same allocator. We use ptr::write to avoid an
// erroneous attempt to drop the contents, and we use
// ptr::read to sidestep condition against destructuring
// types that implement Drop.
unsafe {
let a = ptr::read(&self.a as *const A);
self.dealloc_buffer();
ptr::write(self, RawVec::new_in(a));
}
} else if self.cap != amount {
unsafe {
// We know here that our `amount` is greater than zero. This
// implies, via the assert above, that capacity is also greater
// than zero, which means that we've got a current layout that
// "fits"
//
// We also know that `self.cap` is greater than `amount`, and
// consequently we don't need runtime checks for creating either
// layout
let old_size = elem_size * self.cap;
let new_size = elem_size * amount;
let align = mem::align_of::<T>();
let old_layout = Layout::from_size_align_unchecked(old_size, align);
let new_layout = Layout::from_size_align_unchecked(new_size, align);
match self.a.realloc(self.ptr.as_ptr() as *mut u8,
old_layout,
new_layout) {
Ok(p) => self.ptr = Unique::new_unchecked(p as *mut T),
Err(err) => self.a.oom(err),
}
}
self.cap = amount;
}
}
}
impl<T> RawVec<T, Heap> {
/// Converts the entire buffer into `Box<[T]>`.
///
/// While it is not *strictly* Undefined Behavior to call
/// this procedure while some of the RawVec is uninitialized,
/// it certainly makes it trivial to trigger it.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (see description of type for details)
pub unsafe fn into_box(self) -> Box<[T]> {
// NOTE: not calling `cap()` here, actually using the real `cap` field!
let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
let output: Box<[T]> = Box::from_raw(slice);
mem::forget(self);
output
}
}
impl<T, A: Alloc> RawVec<T, A> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
pub unsafe fn dealloc_buffer(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 {
if let Some(layout) = self.current_layout() {
let ptr = self.ptr() as *mut u8;
self.a.dealloc(ptr, layout);
}
}
}
}
unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
unsafe { self.dealloc_buffer(); }
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects
// * We don't overflow `usize::MAX` and actually allocate too little
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space. e.g. PAE or x32
#[inline]
fn alloc_guard(alloc_size: usize) {
if mem::size_of::<usize>() < 8 {
assert!(alloc_size <= ::core::isize::MAX as usize,
"capacity overflow");
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn allocator_param() {
use allocator::{Alloc, AllocErr};
// Writing a test of integration between third-party
// allocators and RawVec is a little tricky because the RawVec
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the RawVec methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc { fuel: usize }
unsafe impl Alloc for BoundedAlloc {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let size = layout.size();
if size > self.fuel {
return Err(AllocErr::Unsupported { details: "fuel exhausted" });
}
match Heap.alloc(layout) {
ok @ Ok(_) => { self.fuel -= size; ok }
err @ Err(_) => err,
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
Heap.dealloc(ptr, layout)
}
}
let a = BoundedAlloc { fuel: 500 };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.a.fuel, 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.a.fuel, 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
let mut v: RawVec<u32> = RawVec::new();
// First `reserve` allocates like `reserve_exact`
v.reserve(0, 9);
assert_eq!(9, v.cap());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 7);
assert_eq!(7, v.cap());
// 97 if more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.cap());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 12);
assert_eq!(12, v.cap());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.cap() >= 12 + 12 / 2);
}
}
}
| 38.270677 | 98 | 0.549247 |
ff4bc3f36c8d86bd4863baa26bd61d5b96416568
| 24,750 |
/*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//use super::*;
use crate::mock::*;
use crate::test_utils::consts::*;
use crate::{ConfirmedCalls, Enclave, EnclaveRegistry, Error, RawEvent, Request, ShardIdentifier};
use frame_support::{assert_err, assert_ok, IterableStorageMap, StorageMap};
use ias_verify::SgxBuildMode;
use sp_core::H256;
use sp_keyring::AccountKeyring;
fn list_enclaves() -> Vec<(u64, Enclave<AccountId, Vec<u8>>)> {
<EnclaveRegistry<Test>>::iter().collect::<Vec<(u64, Enclave<AccountId, Vec<u8>>)>>()
}
// give get_signer a concrete type
fn get_signer(pubkey: &[u8; 32]) -> AccountId {
crate::test_utils::get_signer(pubkey)
}
#[test]
fn add_enclave_works() {
new_test_ext().execute_with(|| {
// set the now in the runtime such that the remote attestation reports are within accepted range (24h)
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let signer = get_signer(TEST4_SIGNER_PUB);
assert_ok!(Teerex::register_enclave(
Origin::signed(signer),
TEST4_CERT.to_vec(),
URL.to_vec()
));
assert_eq!(Teerex::enclave_count(), 1);
})
}
#[test]
fn add_and_remove_enclave_works() {
new_test_ext().execute_with(|| {
let _ = env_logger::init();
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let signer = get_signer(TEST4_SIGNER_PUB);
assert_ok!(Teerex::register_enclave(
Origin::signed(signer.clone()),
TEST4_CERT.to_vec(),
URL.to_vec()
));
assert_eq!(Teerex::enclave_count(), 1);
assert_ok!(Teerex::unregister_enclave(Origin::signed(signer)));
assert_eq!(Teerex::enclave_count(), 0);
assert_eq!(list_enclaves(), vec![])
})
}
#[test]
fn list_enclaves_works() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let signer = get_signer(TEST4_SIGNER_PUB);
let e_1: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer.clone(),
mr_enclave: TEST4_MRENCLAVE,
timestamp: TEST4_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
assert_ok!(Teerex::register_enclave(
Origin::signed(signer.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
let enclaves = list_enclaves();
assert_eq!(enclaves[0].1.pubkey, signer);
assert!(enclaves.contains(&(1, e_1.clone())));
})
}
#[test]
fn remove_middle_enclave_works() {
new_test_ext().execute_with(|| {
// use the newest timestamp, is as now such that all reports are valid
Timestamp::set_timestamp(TEST7_TIMESTAMP);
let signer5 = get_signer(TEST5_SIGNER_PUB);
let signer6 = get_signer(TEST6_SIGNER_PUB);
let signer7 = get_signer(TEST7_SIGNER_PUB);
// add enclave 1
let e_1: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer5.clone(),
mr_enclave: TEST5_MRENCLAVE,
timestamp: TEST5_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
let e_2: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer6.clone(),
mr_enclave: TEST6_MRENCLAVE,
timestamp: TEST6_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
let e_3: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer7.clone(),
mr_enclave: TEST7_MRENCLAVE,
timestamp: TEST7_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
assert_ok!(Teerex::register_enclave(
Origin::signed(signer5.clone()),
TEST5_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
assert_eq!(list_enclaves(), vec![(1, e_1.clone())]);
// add enclave 2
assert_ok!(Teerex::register_enclave(
Origin::signed(signer6.clone()),
TEST6_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 2);
let enclaves = list_enclaves();
assert!(enclaves.contains(&(1, e_1.clone())));
assert!(enclaves.contains(&(2, e_2.clone())));
// add enclave 3
assert_ok!(Teerex::register_enclave(
Origin::signed(signer7.clone()),
TEST7_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 3);
let enclaves = list_enclaves();
assert!(enclaves.contains(&(1, e_1.clone())));
assert!(enclaves.contains(&(2, e_2.clone())));
assert!(enclaves.contains(&(3, e_3.clone())));
// remove enclave 2
assert_ok!(Teerex::unregister_enclave(Origin::signed(signer6)));
assert_eq!(Teerex::enclave_count(), 2);
let enclaves = list_enclaves();
assert!(enclaves.contains(&(1, e_1.clone())));
assert!(enclaves.contains(&(2, e_3.clone())));
})
}
#[test]
fn register_enclave_with_different_signer_fails() {
new_test_ext().execute_with(|| {
let signer = get_signer(TEST7_SIGNER_PUB);
assert_err!(
Teerex::register_enclave(Origin::signed(signer), TEST5_CERT.to_vec(), URL.to_vec()),
Error::<Test>::SenderIsNotAttestedEnclave
);
})
}
#[test]
fn register_enclave_with_to_old_attestation_report_fails() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST7_TIMESTAMP + TWENTY_FOUR_HOURS + 1);
let signer = get_signer(TEST7_SIGNER_PUB);
assert_err!(
Teerex::register_enclave(Origin::signed(signer), TEST7_CERT.to_vec(), URL.to_vec(),),
Error::<Test>::RemoteAttestationTooOld
);
})
}
#[test]
fn register_enclave_with_almost_too_old_report_works() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST7_TIMESTAMP + TWENTY_FOUR_HOURS - 1);
let signer = get_signer(TEST7_SIGNER_PUB);
assert_ok!(Teerex::register_enclave(
Origin::signed(signer),
TEST7_CERT.to_vec(),
URL.to_vec(),
));
})
}
#[test]
fn update_enclave_url_works() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let signer = get_signer(TEST4_SIGNER_PUB);
let url2 = "my fancy url".as_bytes();
let _e_1: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer.clone(),
mr_enclave: TEST4_MRENCLAVE,
timestamp: TEST4_TIMESTAMP,
url: url2.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
assert_ok!(Teerex::register_enclave(
Origin::signed(signer.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave(1).url, URL.to_vec());
assert_ok!(Teerex::register_enclave(
Origin::signed(signer.clone()),
TEST4_CERT.to_vec(),
url2.to_vec(),
));
assert_eq!(Teerex::enclave(1).url, url2.to_vec());
let enclaves = list_enclaves();
assert_eq!(enclaves[0].1.pubkey, signer)
})
}
#[test]
fn update_ipfs_hash_works() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let ipfs_hash = "QmYY9U7sQzBYe79tVfiMyJ4prEJoJRWCD8t85j9qjssS9y";
let shard = H256::from_slice(&TEST4_MRENCLAVE);
let request_hash = H256::default();
let signer = get_signer(TEST4_SIGNER_PUB);
assert_ok!(Teerex::register_enclave(
Origin::signed(signer.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
assert_ok!(Teerex::confirm_call(
Origin::signed(signer.clone()),
shard.clone(),
request_hash.clone(),
ipfs_hash.as_bytes().to_vec()
));
assert_eq!(
Teerex::latest_ipfs_hash(shard.clone()),
ipfs_hash.as_bytes().to_vec()
);
assert_eq!(Teerex::worker_for_shard(shard.clone()), 1u64);
let expected_event = Event::Teerex(RawEvent::UpdatedIpfsHash(
shard.clone(),
1,
ipfs_hash.as_bytes().to_vec(),
));
assert!(System::events().iter().any(|a| a.event == expected_event));
let expected_event = Event::Teerex(RawEvent::CallConfirmed(signer.clone(), request_hash));
assert!(System::events().iter().any(|a| a.event == expected_event));
})
}
#[test]
fn ipfs_update_from_unregistered_enclave_fails() {
new_test_ext().execute_with(|| {
let ipfs_hash = "QmYY9U7sQzBYe79tVfiMyJ4prEJoJRWCD8t85j9qjssS9y";
let signer = get_signer(TEST4_SIGNER_PUB);
assert_err!(
Teerex::confirm_call(
Origin::signed(signer),
H256::default(),
H256::default(),
ipfs_hash.as_bytes().to_vec()
),
Error::<Test>::EnclaveIsNotRegistered
);
})
}
#[test]
fn call_worker_works() {
new_test_ext().execute_with(|| {
let req = Request {
shard: ShardIdentifier::default(),
cyphertext: vec![0u8, 1, 2, 3, 4],
};
// don't care who signs
let signer = get_signer(TEST4_SIGNER_PUB);
assert!(Teerex::call_worker(Origin::signed(signer), req.clone()).is_ok());
let expected_event = Event::Teerex(RawEvent::Forwarded(req.shard));
println!("events:{:?}", System::events());
assert!(System::events().iter().any(|a| a.event == expected_event));
})
}
#[test]
fn unshield_is_only_executed_once_for_the_same_call_hash() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let signer = get_signer(TEST4_SIGNER_PUB);
let call_hash: H256 = H256::from([1u8; 32]);
let bonding_account = get_signer(&TEST4_MRENCLAVE);
assert_ok!(Teerex::register_enclave(
Origin::signed(signer.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
));
assert_ok!(Balances::transfer(
Origin::signed(AccountKeyring::Alice.to_account_id()),
bonding_account.clone(),
1 << 50
));
assert!(Teerex::unshield_funds(
Origin::signed(signer.clone()),
AccountKeyring::Alice.to_account_id(),
50,
bonding_account.clone(),
call_hash.clone()
)
.is_ok());
assert!(Teerex::unshield_funds(
Origin::signed(signer.clone()),
AccountKeyring::Alice.to_account_id(),
50,
bonding_account.clone(),
call_hash.clone()
)
.is_ok());
assert_eq!(<ConfirmedCalls>::get(call_hash), 2)
})
}
#[test]
fn timestamp_callback_works() {
new_test_ext().execute_with(|| {
set_timestamp(TEST7_TIMESTAMP);
let signer5 = get_signer(TEST5_SIGNER_PUB);
let signer6 = get_signer(TEST6_SIGNER_PUB);
let signer7 = get_signer(TEST7_SIGNER_PUB);
// add enclave 1
let e_2: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer6.clone(),
mr_enclave: TEST6_MRENCLAVE,
timestamp: TEST6_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
let e_3: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer7.clone(),
mr_enclave: TEST7_MRENCLAVE,
timestamp: TEST7_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
//Register 3 enclaves: 5, 6 ,7
assert_ok!(Teerex::register_enclave(
Origin::signed(signer5.clone()),
TEST5_CERT.to_vec(),
URL.to_vec(),
));
assert_ok!(Teerex::register_enclave(
Origin::signed(signer6.clone()),
TEST6_CERT.to_vec(),
URL.to_vec(),
));
assert_ok!(Teerex::register_enclave(
Origin::signed(signer7.clone()),
TEST7_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 3);
//enclave 5 silent since 49h -> unregistered
run_to_block(2);
set_timestamp(TEST5_TIMESTAMP + 2 * TWENTY_FOUR_HOURS + 1);
let expected_event = Event::Teerex(RawEvent::RemovedEnclave(signer5));
assert!(System::events().iter().any(|a| a.event == expected_event));
assert_eq!(Teerex::enclave_count(), 2);
//2 and 3 are still there. 3 and 1 were swapped -> 3 and 2
let enclaves = list_enclaves();
assert!(enclaves.contains(&(1, e_3.clone())));
assert!(enclaves.contains(&(2, e_2.clone())));
run_to_block(3);
//enclave 6 and 7 still registered: not long enough silent
set_timestamp(TEST6_TIMESTAMP + 2 * TWENTY_FOUR_HOURS);
assert_eq!(Teerex::enclave_count(), 2);
//unregister 6 to generate an error next call of callbakc
assert_ok!(Teerex::unregister_enclave(Origin::signed(signer6.clone())));
let expected_event = Event::Teerex(RawEvent::RemovedEnclave(signer6));
assert!(System::events().iter().any(|a| a.event == expected_event));
assert_eq!(Teerex::enclave_count(), 1);
//enclave 6 and 7 silent since TWENTY_FOUR_HOURS + 1 -> unregistered
run_to_block(4);
set_timestamp(TEST7_TIMESTAMP + 2 * TWENTY_FOUR_HOURS + 1);
let expected_event = Event::Teerex(RawEvent::RemovedEnclave(signer7));
assert!(System::events().iter().any(|a| a.event == expected_event));
assert_eq!(Teerex::enclave_count(), 0);
})
}
#[test]
fn debug_mode_enclave_attest_works_when_sgx_debug_mode_is_allowed() {
new_test_ext().execute_with(|| {
set_timestamp(TEST4_TIMESTAMP);
let signer4 = get_signer(TEST4_SIGNER_PUB);
let e_0: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer4.clone(),
mr_enclave: TEST4_MRENCLAVE,
timestamp: TEST4_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Debug,
};
//Register an enclave compiled in debug mode
assert_ok!(Teerex::register_enclave(
Origin::signed(signer4.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
let enclaves = list_enclaves();
assert!(enclaves.contains(&(1, e_0.clone())));
})
}
#[test]
fn production_mode_enclave_attest_works_when_sgx_debug_mode_is_allowed() {
new_test_ext().execute_with(|| {
new_test_ext().execute_with(|| {
set_timestamp(TEST8_TIMESTAMP);
let signer8 = get_signer(TEST8_SIGNER_PUB);
let e_0: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer8.clone(),
mr_enclave: TEST8_MRENCLAVE,
timestamp: TEST8_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Production,
};
//Register an enclave compiled in production mode
assert_ok!(Teerex::register_enclave(
Origin::signed(signer8.clone()),
TEST8_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
let enclaves = list_enclaves();
assert!(enclaves.contains(&(1, e_0.clone())));
})
})
}
#[test]
fn debug_mode_enclave_attest_fails_when_sgx_debug_mode_not_allowed() {
new_test_production_ext().execute_with(|| {
set_timestamp(TEST4_TIMESTAMP);
let signer4 = get_signer(TEST4_SIGNER_PUB);
//Try to register an enclave compiled in debug mode
assert_err!(
Teerex::register_enclave(
Origin::signed(signer4.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
),
Error::<Test>::SgxModeNotAllowed
);
assert_eq!(Teerex::enclave_count(), 0);
})
}
#[test]
fn production_mode_enclave_attest_works_when_sgx_debug_mode_not_allowed() {
new_test_production_ext().execute_with(|| {
set_timestamp(TEST8_TIMESTAMP);
let signer8 = get_signer(TEST8_SIGNER_PUB);
let e_0: Enclave<AccountId, Vec<u8>> = Enclave {
pubkey: signer8.clone(),
mr_enclave: TEST8_MRENCLAVE,
timestamp: TEST8_TIMESTAMP,
url: URL.to_vec(),
sgx_mode: SgxBuildMode::Production,
};
//Register an enclave compiled in production mode
assert_ok!(Teerex::register_enclave(
Origin::signed(signer8.clone()),
TEST8_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
let enclaves = list_enclaves();
assert!(enclaves.contains(&(1, e_0.clone())));
})
}
#[test]
fn verify_unshield_funds_works() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let signer4 = get_signer(TEST4_SIGNER_PUB);
let call_hash: H256 = H256::from([1u8; 32]);
let bonding_account = get_signer(&TEST4_MRENCLAVE);
let incognito_account = INCOGNITO_ACCOUNT.to_vec();
//Register enclave
assert_ok!(Teerex::register_enclave(
Origin::signed(signer4.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
assert!(Teerex::shield_funds(
Origin::signed(AccountKeyring::Alice.to_account_id()),
incognito_account.clone(),
100,
bonding_account.clone(),
)
.is_ok());
assert_eq!(Balances::free_balance(bonding_account.clone()), 100);
let expected_event = Event::Teerex(RawEvent::ShieldFunds(incognito_account));
assert!(System::events().iter().any(|a| a.event == expected_event));
assert!(Teerex::unshield_funds(
Origin::signed(signer4.clone()),
AccountKeyring::Alice.to_account_id(),
50,
bonding_account.clone(),
call_hash.clone()
)
.is_ok());
assert_eq!(Balances::free_balance(bonding_account), 50);
let expected_event = Event::Teerex(RawEvent::UnshieldedFunds(
AccountKeyring::Alice.to_account_id(),
));
assert!(System::events().iter().any(|a| a.event == expected_event));
})
}
#[test]
fn verify_unshield_funds_from_not_registered_enclave_fails() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST4_TIMESTAMP);
let signer4 = get_signer(TEST4_SIGNER_PUB);
let call_hash: H256 = H256::from([1u8; 32]);
assert_eq!(Teerex::enclave_count(), 0);
assert_err!(
Teerex::unshield_funds(
Origin::signed(signer4.clone()),
AccountKeyring::Alice.to_account_id(),
51,
signer4.clone(),
call_hash.clone()
),
Error::<Test>::EnclaveIsNotRegistered
);
})
}
#[test]
fn verify_unshield_funds_from_enclave_not_bonding_account_fails() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST7_TIMESTAMP);
let signer4 = get_signer(TEST4_SIGNER_PUB);
let call_hash: H256 = H256::from([1u8; 32]);
let bonding_account = get_signer(&TEST4_MRENCLAVE);
let incognito_account = INCOGNITO_ACCOUNT;
let not_bonding_account = get_signer(&TEST7_MRENCLAVE);
//Ensure that enclave is registered
assert_ok!(Teerex::register_enclave(
Origin::signed(signer4.clone()),
TEST4_CERT.to_vec(),
URL.to_vec(),
));
//Ensure that bonding account has funds
assert!(Teerex::shield_funds(
Origin::signed(AccountKeyring::Alice.to_account_id()),
incognito_account.to_vec(),
100,
bonding_account.clone(),
)
.is_ok());
assert!(Teerex::shield_funds(
Origin::signed(AccountKeyring::Alice.to_account_id()),
incognito_account.to_vec(),
50,
not_bonding_account.clone(),
)
.is_ok());
assert_err!(
Teerex::unshield_funds(
Origin::signed(signer4.clone()),
AccountKeyring::Alice.to_account_id(),
50,
not_bonding_account.clone(),
call_hash.clone()
),
Error::<Test>::WrongMrenclaveForBondingAccount
);
assert_eq!(Balances::free_balance(bonding_account.clone()), 100);
assert_eq!(Balances::free_balance(not_bonding_account.clone()), 50);
})
}
#[test]
fn verify_call_confirmation_from_shards_not_enclave_fails() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST7_TIMESTAMP);
let ipfs_hash = "QmYY9U7sQzBYe79tVfiMyJ4prEJoJRWCD8t85j9qjssS9y";
let request_hash = H256::default();
let signer7 = get_signer(TEST7_SIGNER_PUB);
let shard4 = H256::from_slice(&TEST4_MRENCLAVE);
//Ensure that enclave is registered
assert_ok!(Teerex::register_enclave(
Origin::signed(signer7.clone()),
TEST7_CERT.to_vec(),
URL.to_vec(),
));
assert_err!(
Teerex::confirm_call(
Origin::signed(signer7.clone()),
shard4.clone(),
request_hash.clone(),
ipfs_hash.as_bytes().to_vec()
),
Error::<Test>::WrongMrenclaveForShard
);
})
}
#[test]
fn update_block_confirmation_works() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST7_TIMESTAMP);
let ipfs_hash = "QmYY9U7sQzBYe79tVfiMyJ4prEJoJRWCD8t85j9qjssS9y";
let block_hash = H256::default();
let signer7 = get_signer(TEST7_SIGNER_PUB);
let shard7 = H256::from_slice(&TEST7_MRENCLAVE);
//Ensure that enclave is registered
assert_ok!(Teerex::register_enclave(
Origin::signed(signer7.clone()),
TEST7_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
assert_ok!(Teerex::confirm_block(
Origin::signed(signer7.clone()),
shard7.clone(),
block_hash.clone(),
ipfs_hash.as_bytes().to_vec()
));
let expected_event = Event::Teerex(RawEvent::UpdatedIpfsHash(
shard7.clone(),
1,
ipfs_hash.as_bytes().to_vec(),
));
assert!(System::events().iter().any(|a| a.event == expected_event));
let expected_event = Event::Teerex(RawEvent::BlockConfirmed(signer7.clone(), block_hash));
assert!(System::events().iter().any(|a| a.event == expected_event));
})
}
#[test]
fn verify_block_confirmation_from_shards_not_enclave_fails() {
new_test_ext().execute_with(|| {
Timestamp::set_timestamp(TEST7_TIMESTAMP);
let ipfs_hash = "QmYY9U7sQzBYe79tVfiMyJ4prEJoJRWCD8t85j9qjssS9y";
let block_hash = H256::default();
let signer7 = get_signer(TEST7_SIGNER_PUB);
let shard4 = H256::from_slice(&TEST4_MRENCLAVE);
//Ensure that enclave is registered
assert_ok!(Teerex::register_enclave(
Origin::signed(signer7.clone()),
TEST7_CERT.to_vec(),
URL.to_vec(),
));
assert_eq!(Teerex::enclave_count(), 1);
assert_err!(
Teerex::confirm_block(
Origin::signed(signer7.clone()),
shard4.clone(),
block_hash.clone(),
ipfs_hash.as_bytes().to_vec()
),
Error::<Test>::WrongMrenclaveForShard
);
})
}
| 33.811475 | 110 | 0.590747 |
feacac0878227170efc94f7b787a545e9ff8baab
| 3,732 |
use std::fmt;
use std::fmt::Display;
use std::fmt::Formatter;
use crate::lexer::Position;
use crate::lexer::Token;
use crate::lexer::TokenTy;
pub type Result<'r, OK> = std::result::Result<OK, Error<'r>>;
#[derive(Debug)]
pub struct Error<'e> {
kind: ErrorKind<'e>,
}
#[derive(Debug)]
pub enum ErrorKind<'e> {
MissingToken {
handled: Vec<TokenTy>,
after: Option<Position>,
},
WrongToken {
token: &'e Token<'e>,
handled: Vec<TokenTy>,
},
Multiple(Vec<Error<'e>>),
}
impl<'e> Error<'e> {
pub(super) fn missing_token(handled: Vec<TokenTy>, after: Option<Position>) -> Error<'e> {
Error {
kind: ErrorKind::MissingToken { handled, after },
}
}
pub(super) fn wrong_token(token: &'e Token<'e>, handled: Vec<TokenTy>) -> Error<'e> {
Error {
kind: ErrorKind::WrongToken { token, handled },
}
}
pub(super) fn multiple(errors: Vec<Error<'e>>) -> Error<'e> {
Error {
kind: ErrorKind::Multiple(errors),
}
}
pub(super) fn is_wrong_token(&self) -> bool {
if let ErrorKind::WrongToken { .. } = self.kind {
true
} else {
false
}
}
pub(super) fn max_after(&mut self, after: Option<Position>) {
if let ErrorKind::MissingToken { after: after_, .. } = &mut self.kind {
match (after_, after) {
(Some(after_), Some(after)) => *after_ = std::cmp::max(*after_, after),
(after_, Some(after)) => *after_ = Some(after),
_ => (),
}
}
}
pub(super) fn concat(mut self, mut other: Error<'e>) -> Error<'e> {
match (&mut self.kind, &mut other.kind) {
(ErrorKind::Multiple(left), ErrorKind::Multiple(right)) => {
left.append(right);
self
}
(ErrorKind::Multiple(multi), _) => {
multi.push(other);
self
}
(_, ErrorKind::Multiple(multi)) => {
multi.push(self);
other
}
(_, _) => Error::multiple(vec![self, other]),
}
}
}
impl<'e> Display for Error<'e> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
match &self.kind {
ErrorKind::MissingToken { handled, after } => {
write!(fmt, "missing token( handled:")?;
for handled in handled {
write!(fmt, r#" "{}" "#, handled)?;
}
write!(fmt, " )")?;
if let Some(after) = after {
write!(fmt, " after {}", after)?;
}
Ok(())
}
ErrorKind::WrongToken { token, handled } => {
write!(fmt, r#"wrong token( handled:"#)?;
let mut first = true;
for handled in handled {
if !first {
write!(fmt, ", ")?;
} else {
first = false;
}
write!(fmt, r#"{}"#, handled)?;
}
write!(fmt, " ):{}", token)
}
ErrorKind::Multiple(errors) => {
write!(fmt, "multiple errors possible: [ ")?;
let mut first = true;
for error in errors {
if !first {
write!(fmt, " || ")?;
} else {
first = false;
}
write!(fmt, "{}", error)?;
}
write!(fmt, " ]")
}
}
}
}
| 27.043478 | 94 | 0.427117 |
39986bc698938752265982f575855fa4c32833d8
| 4,943 |
// Copyright 2017-2019 int08h LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "awskms")]
pub mod inner {
use std::collections::HashMap;
use std::default::Default;
use std::error::Error;
use std::fmt;
use std::fmt::Formatter;
use std::str::FromStr;
use rusoto_core::Region;
use rusoto_kms::{DecryptRequest, EncryptRequest, Kms, KmsClient};
use crate::kms::{EncryptedDEK, KmsError, KmsProvider, PlaintextDEK, AD, DEK_SIZE_BYTES};
/// Amazon Web Services Key Management Service
/// https://aws.amazon.com/kms/
pub struct AwsKms {
kms_client: KmsClient,
key_id: String,
}
impl AwsKms {
/// Create a new instance from the full ARN of a AWS KMS key. The ARN is expected
/// to be of the form `arn:aws:kms:some-aws-region:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`
pub fn from_arn(arn: &str) -> Result<Self, KmsError> {
let parts: Vec<&str> = arn.split(':').collect();
if parts.len() != 6 {
return Err(KmsError::InvalidConfiguration(format!(
"invalid KMS arn: too few parts {}",
parts.len()
)));
}
let region_part = parts.get(3).expect("region is missing");
let region = match Region::from_str(region_part) {
Ok(r) => r,
Err(e) => return Err(KmsError::InvalidConfiguration(e.description().to_string())),
};
Ok(AwsKms {
kms_client: KmsClient::new(region),
key_id: arn.to_string(),
})
}
}
impl KmsProvider for AwsKms {
fn encrypt_dek(&self, plaintext_dek: &PlaintextDEK) -> Result<EncryptedDEK, KmsError> {
if plaintext_dek.len() != DEK_SIZE_BYTES {
return Err(KmsError::InvalidKey(format!(
"provided DEK wrong length: {}",
plaintext_dek.len()
)));
}
let mut encrypt_req: EncryptRequest = Default::default();
encrypt_req.key_id = self.key_id.clone();
encrypt_req.plaintext = plaintext_dek.clone();
let mut enc_context = HashMap::new();
enc_context.insert("AD".to_string(), AD.to_string());
encrypt_req.encryption_context = Some(enc_context);
match self.kms_client.encrypt(encrypt_req).sync() {
Ok(result) => {
if let Some(ciphertext) = result.ciphertext_blob {
Ok(ciphertext)
} else {
Err(KmsError::OperationFailed(
"no ciphertext despite successful response".to_string(),
))
}
}
Err(e) => Err(KmsError::OperationFailed(e.description().to_string())),
}
}
fn decrypt_dek(&self, encrypted_dek: &EncryptedDEK) -> Result<PlaintextDEK, KmsError> {
let mut decrypt_req: DecryptRequest = Default::default();
decrypt_req.ciphertext_blob = encrypted_dek.clone();
let mut dec_context = HashMap::new();
dec_context.insert("AD".to_string(), AD.to_string());
decrypt_req.encryption_context = Some(dec_context);
match self.kms_client.decrypt(decrypt_req).sync() {
Ok(result) => {
if let Some(plaintext_dek) = result.plaintext {
if plaintext_dek.len() == DEK_SIZE_BYTES {
Ok(plaintext_dek)
} else {
Err(KmsError::InvalidKey(format!(
"decrypted DEK wrong length: {}",
plaintext_dek.len()
)))
}
} else {
Err(KmsError::OperationFailed(
"decrypted payload is empty".to_string(),
))
}
}
Err(e) => Err(KmsError::OperationFailed(e.description().to_string())),
}
}
}
#[cfg(feature = "awskms")]
impl fmt::Display for AwsKms {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", self.key_id)
}
}
}
| 38.317829 | 113 | 0.534493 |
61a5f8cbcd1b469368520c6f95000d671f6864da
| 1,500 |
// Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_base::tokio;
use common_exception::Result;
use futures::TryStreamExt;
use crate::catalogs::Table;
use crate::catalogs::ToReadDataSourcePlan;
use crate::datasources::database::system::FunctionsTable;
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_functions_table() -> Result<()> {
let ctx = crate::tests::try_create_context()?;
let table: Arc<dyn Table> = Arc::new(FunctionsTable::create(1));
let io_ctx = ctx.get_single_node_table_io_context()?;
let io_ctx = Arc::new(io_ctx);
let source_plan = table.read_plan(
io_ctx.clone(),
None,
Some(ctx.get_settings().get_max_threads()? as usize),
)?;
let stream = table.read(io_ctx, &source_plan.push_downs).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let block = &result[0];
assert_eq!(block.num_columns(), 2);
Ok(())
}
| 34.883721 | 75 | 0.704 |
abe15ad3c5d7144ce861198e1c2553ea2043498d
| 16,016 |
use std::cell::RefCell;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use futures::future::{ok, FutureExt, LocalBoxFuture};
use crate::http::{Extensions, Request, Response};
use crate::router::{Path, ResourceDef, ResourceInfo, Router};
use crate::service::boxed::{self, BoxService, BoxServiceFactory};
use crate::{fn_service, Service, ServiceFactory};
use super::config::AppConfig;
use super::error::ErrorRenderer;
use super::guard::Guard;
use super::httprequest::{HttpRequest, HttpRequestPool};
use super::request::WebRequest;
use super::response::WebResponse;
use super::rmap::ResourceMap;
use super::service::{AppServiceFactory, WebServiceConfig};
use super::types::data::DataFactory;
type Guards = Vec<Box<dyn Guard>>;
type HttpService<Err: ErrorRenderer> =
BoxService<WebRequest<Err>, WebResponse, Err::Container>;
type HttpNewService<Err: ErrorRenderer> =
BoxServiceFactory<(), WebRequest<Err>, WebResponse, Err::Container, ()>;
type BoxResponse<Err: ErrorRenderer> =
LocalBoxFuture<'static, Result<WebResponse, Err::Container>>;
type FnDataFactory =
Box<dyn Fn() -> LocalBoxFuture<'static, Result<Box<dyn DataFactory>, ()>>>;
/// Service factory to convert `Request` to a `WebRequest<S>`.
/// It also executes data factories.
pub struct AppFactory<T, Err: ErrorRenderer>
where
T: ServiceFactory<
Config = (),
Request = WebRequest<Err>,
Response = WebResponse,
Error = Err::Container,
InitError = (),
>,
Err: ErrorRenderer,
{
pub(super) endpoint: T,
pub(super) extensions: RefCell<Option<Extensions>>,
pub(super) data: Rc<Vec<Box<dyn DataFactory>>>,
pub(super) data_factories: Rc<Vec<FnDataFactory>>,
pub(super) services: Rc<RefCell<Vec<Box<dyn AppServiceFactory<Err>>>>>,
pub(super) default: Option<Rc<HttpNewService<Err>>>,
pub(super) factory_ref: Rc<RefCell<Option<AppRoutingFactory<Err>>>>,
pub(super) external: RefCell<Vec<ResourceDef>>,
pub(super) case_insensitive: bool,
}
impl<T, Err> ServiceFactory for AppFactory<T, Err>
where
T: ServiceFactory<
Config = (),
Request = WebRequest<Err>,
Response = WebResponse,
Error = Err::Container,
InitError = (),
>,
Err: ErrorRenderer,
{
type Config = AppConfig;
type Request = Request;
type Response = WebResponse;
type Error = T::Error;
type InitError = T::InitError;
type Service = AppFactoryService<T::Service, Err>;
type Future = AppFactoryResult<T, Err>;
fn new_service(&self, config: AppConfig) -> Self::Future {
// update resource default service
let default = self.default.clone().unwrap_or_else(|| {
Rc::new(boxed::factory(fn_service(|req: WebRequest<Err>| {
ok(req.into_response(Response::NotFound().finish()))
})))
});
// App config
let mut config =
WebServiceConfig::new(config, default.clone(), self.data.clone());
// register services
std::mem::take(&mut *self.services.borrow_mut())
.into_iter()
.for_each(|mut srv| srv.register(&mut config));
let mut rmap = ResourceMap::new(ResourceDef::new(""));
let (config, services) = config.into_services();
// complete pipeline creation
*self.factory_ref.borrow_mut() = Some(AppRoutingFactory {
default,
services: Rc::new(
services
.into_iter()
.map(|(mut rdef, srv, guards, nested)| {
rmap.add(&mut rdef, nested);
(rdef, srv, RefCell::new(guards))
})
.collect(),
),
case_insensitive: self.case_insensitive,
});
// external resources
for mut rdef in std::mem::take(&mut *self.external.borrow_mut()) {
rmap.add(&mut rdef, None);
}
// complete ResourceMap tree creation
let rmap = Rc::new(rmap);
rmap.finish(rmap.clone());
AppFactoryResult {
endpoint: None,
endpoint_fut: self.endpoint.new_service(()),
data: self.data.clone(),
data_factories: Vec::new(),
data_factories_fut: self.data_factories.iter().map(|f| f()).collect(),
case_insensitive: self.case_insensitive,
extensions: Some(
self.extensions
.borrow_mut()
.take()
.unwrap_or_else(Extensions::new),
),
config,
rmap,
_t: PhantomData,
}
}
}
pin_project_lite::pin_project! {
pub struct AppFactoryResult<T: ServiceFactory, Err> {
endpoint: Option<T::Service>,
#[pin]
endpoint_fut: T::Future,
rmap: Rc<ResourceMap>,
config: AppConfig,
data: Rc<Vec<Box<dyn DataFactory>>>,
data_factories: Vec<Box<dyn DataFactory>>,
data_factories_fut: Vec<LocalBoxFuture<'static, Result<Box<dyn DataFactory>, ()>>>,
case_insensitive: bool,
extensions: Option<Extensions>,
_t: PhantomData<Err>,
}
}
impl<T, Err> Future for AppFactoryResult<T, Err>
where
T: ServiceFactory<
Config = (),
Request = WebRequest<Err>,
Response = WebResponse,
Error = Err::Container,
InitError = (),
>,
Err: ErrorRenderer,
{
type Output = Result<AppFactoryService<T::Service, Err>, ()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
// async data factories
let mut idx = 0;
while idx < this.data_factories_fut.len() {
match Pin::new(&mut this.data_factories_fut[idx]).poll(cx)? {
Poll::Ready(f) => {
this.data_factories.push(f);
let _ = this.data_factories_fut.remove(idx);
}
Poll::Pending => idx += 1,
}
}
if this.endpoint.is_none() {
if let Poll::Ready(srv) = this.endpoint_fut.poll(cx)? {
*this.endpoint = Some(srv);
}
}
if this.endpoint.is_some() && this.data_factories_fut.is_empty() {
// create app data container
let mut data = this.extensions.take().unwrap();
for f in this.data.iter() {
f.create(&mut data);
}
for f in this.data_factories.iter() {
f.create(&mut data);
}
Poll::Ready(Ok(AppFactoryService {
service: this.endpoint.take().unwrap(),
rmap: this.rmap.clone(),
config: this.config.clone(),
data: Rc::new(data),
pool: HttpRequestPool::create(),
_t: PhantomData,
}))
} else {
Poll::Pending
}
}
}
/// Service to convert `Request` to a `WebRequest<Err>`
pub struct AppFactoryService<T, Err>
where
T: Service<
Request = WebRequest<Err>,
Response = WebResponse,
Error = Err::Container,
>,
Err: ErrorRenderer,
{
service: T,
rmap: Rc<ResourceMap>,
config: AppConfig,
data: Rc<Extensions>,
pool: &'static HttpRequestPool,
_t: PhantomData<Err>,
}
impl<T, Err> Service for AppFactoryService<T, Err>
where
T: Service<
Request = WebRequest<Err>,
Response = WebResponse,
Error = Err::Container,
>,
Err: ErrorRenderer,
{
type Request = Request;
type Response = WebResponse;
type Error = T::Error;
type Future = T::Future;
#[inline]
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
#[inline]
fn poll_shutdown(&self, cx: &mut Context<'_>, is_error: bool) -> Poll<()> {
self.service.poll_shutdown(cx, is_error)
}
fn call(&self, req: Request) -> Self::Future {
let (head, payload) = req.into_parts();
let req = if let Some(mut req) = self.pool.get_request() {
let inner = Rc::get_mut(&mut req.0).unwrap();
inner.path.set(head.uri.clone());
inner.head = head;
inner.payload = payload;
inner.app_data = self.data.clone();
req
} else {
HttpRequest::new(
Path::new(head.uri.clone()),
head,
payload,
self.rmap.clone(),
self.config.clone(),
self.data.clone(),
self.pool,
)
};
self.service.call(WebRequest::new(req))
}
}
impl<T, Err> Drop for AppFactoryService<T, Err>
where
T: Service<
Request = WebRequest<Err>,
Response = WebResponse,
Error = Err::Container,
>,
Err: ErrorRenderer,
{
fn drop(&mut self) {
self.pool.clear();
}
}
pub struct AppRoutingFactory<Err: ErrorRenderer> {
services: Rc<Vec<(ResourceDef, HttpNewService<Err>, RefCell<Option<Guards>>)>>,
default: Rc<HttpNewService<Err>>,
case_insensitive: bool,
}
impl<Err: ErrorRenderer> ServiceFactory for AppRoutingFactory<Err> {
type Config = ();
type Request = WebRequest<Err>;
type Response = WebResponse;
type Error = Err::Container;
type InitError = ();
type Service = AppRouting<Err>;
type Future = AppRoutingFactoryResponse<Err>;
fn new_service(&self, _: ()) -> Self::Future {
AppRoutingFactoryResponse {
fut: self
.services
.iter()
.map(|(path, service, guards)| {
CreateAppRoutingItem::Future(
Some(path.clone()),
guards.borrow_mut().take(),
service.new_service(()).boxed_local(),
)
})
.collect(),
default: None,
default_fut: Some(self.default.new_service(())),
case_insensitive: self.case_insensitive,
}
}
}
type HttpServiceFut<Err> = LocalBoxFuture<'static, Result<HttpService<Err>, ()>>;
/// Create app service
#[doc(hidden)]
pub struct AppRoutingFactoryResponse<Err: ErrorRenderer> {
fut: Vec<CreateAppRoutingItem<Err>>,
default: Option<HttpService<Err>>,
default_fut: Option<LocalBoxFuture<'static, Result<HttpService<Err>, ()>>>,
case_insensitive: bool,
}
enum CreateAppRoutingItem<Err: ErrorRenderer> {
Future(Option<ResourceDef>, Option<Guards>, HttpServiceFut<Err>),
Service(ResourceDef, Option<Guards>, HttpService<Err>),
}
impl<Err: ErrorRenderer> Future for AppRoutingFactoryResponse<Err> {
type Output = Result<AppRouting<Err>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut done = true;
if let Some(ref mut fut) = self.default_fut {
match Pin::new(fut).poll(cx)? {
Poll::Ready(default) => self.default = Some(default),
Poll::Pending => done = false,
}
}
// poll http services
for item in &mut self.fut {
let res = match item {
CreateAppRoutingItem::Future(
ref mut path,
ref mut guards,
ref mut fut,
) => match Pin::new(fut).poll(cx) {
Poll::Ready(Ok(service)) => {
Some((path.take().unwrap(), guards.take(), service))
}
Poll::Ready(Err(_)) => return Poll::Ready(Err(())),
Poll::Pending => {
done = false;
None
}
},
CreateAppRoutingItem::Service(_, _, _) => continue,
};
if let Some((path, guards, service)) = res {
*item = CreateAppRoutingItem::Service(path, guards, service);
}
}
if done {
let mut router =
self.fut
.drain(..)
.fold(Router::build(), |mut router, item| {
match item {
CreateAppRoutingItem::Service(path, guards, service) => {
router.rdef(path, service).2 = guards;
}
CreateAppRoutingItem::Future(_, _, _) => unreachable!(),
}
router
});
if self.case_insensitive {
router.case_insensitive();
}
Poll::Ready(Ok(AppRouting {
ready: None,
router: router.finish(),
default: self.default.take(),
}))
} else {
Poll::Pending
}
}
}
pub struct AppRouting<Err: ErrorRenderer> {
router: Router<HttpService<Err>, Guards>,
ready: Option<(WebRequest<Err>, ResourceInfo)>,
default: Option<HttpService<Err>>,
}
impl<Err: ErrorRenderer> Service for AppRouting<Err> {
type Request = WebRequest<Err>;
type Response = WebResponse;
type Error = Err::Container;
type Future = BoxResponse<Err>;
#[inline]
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.ready.is_none() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&self, mut req: WebRequest<Err>) -> Self::Future {
let res = self.router.recognize_checked(&mut req, |req, guards| {
if let Some(guards) = guards {
for f in guards {
if !f.check(req.head()) {
return false;
}
}
}
true
});
if let Some((srv, _info)) = res {
srv.call(req)
} else if let Some(ref default) = self.default {
default.call(req)
} else {
let req = req.into_parts().0;
ok(WebResponse::new(Response::NotFound().finish(), req)).boxed_local()
}
}
}
/// Wrapper service for routing
pub struct AppEntry<Err: ErrorRenderer> {
factory: Rc<RefCell<Option<AppRoutingFactory<Err>>>>,
}
impl<Err: ErrorRenderer> AppEntry<Err> {
pub fn new(factory: Rc<RefCell<Option<AppRoutingFactory<Err>>>>) -> Self {
AppEntry { factory }
}
}
impl<Err: ErrorRenderer> ServiceFactory for AppEntry<Err> {
type Config = ();
type Request = WebRequest<Err>;
type Response = WebResponse;
type Error = Err::Container;
type InitError = ();
type Service = AppRouting<Err>;
type Future = AppRoutingFactoryResponse<Err>;
fn new_service(&self, _: ()) -> Self::Future {
self.factory.borrow_mut().as_mut().unwrap().new_service(())
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use crate::service::Service;
use crate::web::test::{init_service, TestRequest};
use crate::web::{self, App, HttpResponse};
struct DropData(Arc<AtomicBool>);
impl Drop for DropData {
fn drop(&mut self) {
self.0.store(true, Ordering::Relaxed);
}
}
#[ntex_rt::test]
async fn test_drop_data() {
let data = Arc::new(AtomicBool::new(false));
{
let app =
init_service(App::new().data(DropData(data.clone())).service(
web::resource("/test").to(|| async { HttpResponse::Ok() }),
))
.await;
let req = TestRequest::with_uri("/test").to_request();
let _ = app.call(req).await.unwrap();
}
assert!(data.load(Ordering::Relaxed));
}
}
| 30.859345 | 91 | 0.546079 |
080abfd5478ac2c9580d170da96c001acf12fbdf
| 3,748 |
use rune::{Errors, Options, Sources, Warnings};
use runestick::{Any, Context, Module, Protocol, Source, Value, Vm};
use std::sync::Arc;
#[test]
fn test_external_ops() {
/// Test case for a single operation.
macro_rules! test_case {
([$($op:tt)*], $protocol:ident, $derived:tt, $initial:literal, $arg:literal, $expected:literal) => {{
#[derive(Debug, Default, Any)]
struct External {
value: i64,
field: i64,
#[rune($derived)]
derived: i64,
#[rune($derived = "External::custom")]
custom: i64,
}
impl External {
fn value(&mut self, value: i64) {
self.value $($op)* value;
}
fn field(&mut self, value: i64) {
self.field $($op)* value;
}
fn custom(&mut self, value: i64) {
self.custom $($op)* value;
}
}
let mut module = Module::new();
module.ty::<External>().unwrap();
module
.inst_fn(Protocol::$protocol, External::value)
.unwrap();
module
.field_fn(Protocol::$protocol, "field", External::field)
.unwrap();
let mut context = Context::with_default_modules().unwrap();
context.install(&module).unwrap();
let mut sources = Sources::new();
sources.insert(Source::new(
"test",
format!(r#"
pub fn type(number) {{
number {op} {arg};
number.field {op} {arg};
number.derived {op} {arg};
number.custom {op} {arg};
}}
"#, op = stringify!($($op)*), arg = stringify!($arg)),
));
let mut errors = Errors::new();
let unit = rune::load_sources(
&context,
&Options::default(),
&mut sources,
&mut errors,
&mut Warnings::disabled(),
)
.unwrap();
let unit = Arc::new(unit);
let vm = Vm::new(Arc::new(context.runtime()), unit);
{
let mut foo = External::default();
foo.value = $initial;
foo.field = $initial;
foo.derived = $initial;
foo.custom = $initial;
let output = vm.clone().call(&["type"], (&mut foo,)).unwrap();
assert_eq!(foo.value, $expected, "{} != {} (value)", foo.value, $expected);
assert_eq!(foo.field, $expected, "{} != {} (field)", foo.value, $expected);
assert_eq!(foo.derived, $expected, "{} != {} (derived)", foo.value, $expected);
assert_eq!(foo.custom, $expected, "{} != {} (custom)", foo.value, $expected);
assert!(matches!(output, Value::Unit));
}
}};
}
test_case!([+=], ADD_ASSIGN, add_assign, 0, 3, 3);
test_case!([-=], SUB_ASSIGN, sub_assign, 4, 3, 1);
test_case!([*=], MUL_ASSIGN, mul_assign, 8, 2, 16);
test_case!([/=], DIV_ASSIGN, div_assign, 8, 3, 2);
test_case!([&=], BIT_AND_ASSIGN, bit_and_assign, 0b1001, 0b0011, 0b0001);
test_case!([|=], BIT_OR_ASSIGN, bit_or_assign, 0b1001, 0b0011, 0b1011);
test_case!([^=], BIT_XOR_ASSIGN, bit_xor_assign, 0b1001, 0b0011, 0b1010);
test_case!([<<=], SHL_ASSIGN, shl_assign, 0b1001, 0b0001, 0b10010);
test_case!([>>=], SHR_ASSIGN, shr_assign, 0b1001, 0b0001, 0b100);
test_case!([%=], REM_ASSIGN, rem_assign, 25, 10, 5);
}
| 36.038462 | 109 | 0.470918 |
181ca2cbff5f8668a5abe363da48ec652b75b0e8
| 586 |
use cc::Build;
use std::path::Path;
#[cfg(windows)]
use winres::WindowsResource;
#[cfg(windows)]
fn set_icon() {
let mut res = WindowsResource::new();
res.set_icon("../../ci/windows/logo.ico");
res.compile().unwrap();
}
#[cfg(unix)]
fn set_icon() {}
fn main() {
let project_root = Path::new(env!("CARGO_MANIFEST_DIR"));
let native_src = project_root.join("native");
set_icon();
Build::new()
.file(native_src.join("libxml.c"))
.flag_if_supported("-Wno-unused-parameter") // unused parameter in silent callback
.compile("mylib");
}
| 22.538462 | 90 | 0.627986 |
097439b967c7035bee4c5bcf5a1785b4a7ce1d04
| 411 |
fn main() {
struct Foo;
trait Shape { fn area(&self) -> f64; }
trait Circle : Shape { fn radius(&self) -> f64; }
impl Shape for Foo {
fn area(&self) -> f64 {
0.0
}
}
impl Circle for Foo {
fn radius(&self) -> f64 {
println!("calling area: {}", self.area());
0.0
}
}
let c = Foo;
c.radius();
}
| 18.681818 | 54 | 0.418491 |
de5cf76ea129541b308c5a1cfe83777db69dbc34
| 20,020 |
#![deny(missing_docs)]
//! Structured access to the output of `cargo metadata` and `cargo --message-format=json`.
//! Usually used from within a `cargo-*` executable
//!
//! See the [cargo book](https://doc.rust-lang.org/cargo/index.html) for
//! details on cargo itself.
//!
//! ## Examples
//!
//! ```rust
//! # extern crate cargo_metadata;
//! # use std::path::Path;
//! let mut args = std::env::args().skip_while(|val| !val.starts_with("--manifest-path"));
//!
//! let mut cmd = cargo_metadata::MetadataCommand::new();
//! let manifest_path = match args.next() {
//! Some(ref p) if p == "--manifest-path" => {
//! cmd.manifest_path(args.next().unwrap());
//! }
//! Some(p) => {
//! cmd.manifest_path(p.trim_start_matches("--manifest-path="));
//! }
//! None => {}
//! };
//!
//! let _metadata = cmd.exec().unwrap();
//! ```
//!
//! Pass features flags
//!
//! ```rust
//! # // This should be kept in sync with the equivalent example in the readme.
//! # extern crate cargo_metadata;
//! # use std::path::Path;
//! # fn main() {
//! use cargo_metadata::{MetadataCommand, CargoOpt};
//!
//! let _metadata = MetadataCommand::new()
//! .manifest_path("./Cargo.toml")
//! .features(CargoOpt::AllFeatures)
//! .exec()
//! .unwrap();
//! # }
//! ```
//!
//! Parse message-format output:
//!
//! ```
//! # extern crate cargo_metadata;
//! use std::process::{Stdio, Command};
//! use cargo_metadata::Message;
//!
//! let mut command = Command::new("cargo")
//! .args(&["build", "--message-format=json-render-diagnostics"])
//! .stdout(Stdio::piped())
//! .spawn()
//! .unwrap();
//!
//! let reader = std::io::BufReader::new(command.stdout.take().unwrap());
//! for message in cargo_metadata::Message::parse_stream(reader) {
//! match message.unwrap() {
//! Message::CompilerMessage(msg) => {
//! println!("{:?}", msg);
//! },
//! Message::CompilerArtifact(artifact) => {
//! println!("{:?}", artifact);
//! },
//! Message::BuildScriptExecuted(script) => {
//! println!("{:?}", script);
//! },
//! Message::BuildFinished(finished) => {
//! println!("{:?}", finished);
//! },
//! _ => () // Unknown message
//! }
//! }
//!
//! let output = command.wait().expect("Couldn't get cargo's exit status");
//! ```
use std::collections::HashMap;
use std::env;
use std::fmt;
use std::path::PathBuf;
use std::process::Command;
use std::str::from_utf8;
pub use semver::Version;
pub use dependency::{Dependency, DependencyKind};
use diagnostic::Diagnostic;
pub use errors::{Error, Result};
#[allow(deprecated)]
pub use messages::parse_messages;
pub use messages::{
Artifact, ArtifactProfile, BuildFinished, BuildScript, CompilerMessage, Message, MessageIter,
};
use serde::{Deserialize, Serialize};
mod dependency;
pub mod diagnostic;
mod errors;
mod messages;
/// An "opaque" identifier for a package.
/// It is possible to inspect the `repr` field, if the need arises, but its
/// precise format is an implementation detail and is subject to change.
///
/// `Metadata` can be indexed by `PackageId`.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(transparent)]
pub struct PackageId {
/// The underlying string representation of id.
pub repr: String,
}
impl std::fmt::Display for PackageId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.repr, f)
}
}
// Helpers for default metadata fields
fn is_null(value: &serde_json::Value) -> bool {
match value {
serde_json::Value::Null => true,
_ => false,
}
}
#[derive(Clone, Serialize, Deserialize, Debug)]
/// Starting point for metadata returned by `cargo metadata`
pub struct Metadata {
/// A list of all crates referenced by this crate (and the crate itself)
pub packages: Vec<Package>,
/// A list of all workspace members
pub workspace_members: Vec<PackageId>,
/// Dependencies graph
pub resolve: Option<Resolve>,
/// Workspace root
pub workspace_root: PathBuf,
/// Build directory
pub target_directory: PathBuf,
/// The workspace-level metadata object. Null if non-existent.
#[serde(rename = "metadata", default, skip_serializing_if = "is_null")]
pub workspace_metadata: serde_json::Value,
version: usize,
#[doc(hidden)]
#[serde(skip)]
__do_not_match_exhaustively: (),
}
impl Metadata {
/// Get the root package of this metadata instance.
pub fn root_package(&self) -> Option<&Package> {
let root = self.resolve.as_ref()?.root.as_ref()?;
self.packages.iter().find(|pkg| &pkg.id == root)
}
}
impl<'a> std::ops::Index<&'a PackageId> for Metadata {
type Output = Package;
fn index(&self, idx: &'a PackageId) -> &Package {
self.packages
.iter()
.find(|p| p.id == *idx)
.unwrap_or_else(|| panic!("no package with this id: {:?}", idx))
}
}
#[derive(Clone, Serialize, Deserialize, Debug)]
/// A dependency graph
pub struct Resolve {
/// Nodes in a dependencies graph
pub nodes: Vec<Node>,
/// The crate for which the metadata was read.
pub root: Option<PackageId>,
#[doc(hidden)]
#[serde(skip)]
__do_not_match_exhaustively: (),
}
#[derive(Clone, Serialize, Deserialize, Debug)]
/// A node in a dependencies graph
pub struct Node {
/// An opaque identifier for a package
pub id: PackageId,
/// Dependencies in a structured format.
///
/// `deps` handles renamed dependencies whereas `dependencies` does not.
#[serde(default)]
pub deps: Vec<NodeDep>,
/// List of opaque identifiers for this node's dependencies.
/// It doesn't support renamed dependencies. See `deps`.
pub dependencies: Vec<PackageId>,
/// Features enabled on the crate
#[serde(default)]
pub features: Vec<String>,
#[doc(hidden)]
#[serde(skip)]
__do_not_match_exhaustively: (),
}
#[derive(Clone, Serialize, Deserialize, Debug)]
/// A dependency in a node
pub struct NodeDep {
/// The name of the dependency's library target.
/// If the crate was renamed, it is the new name.
pub name: String,
/// Package ID (opaque unique identifier)
pub pkg: PackageId,
/// The kinds of dependencies.
///
/// This field was added in Rust 1.41.
#[serde(default)]
pub dep_kinds: Vec<DepKindInfo>,
#[doc(hidden)]
#[serde(skip)]
__do_not_match_exhaustively: (),
}
#[derive(Clone, Serialize, Deserialize, Debug)]
/// Information about a dependency kind.
pub struct DepKindInfo {
/// The kind of dependency.
#[serde(deserialize_with = "dependency::parse_dependency_kind")]
pub kind: DependencyKind,
/// The target platform for the dependency.
///
/// This is `None` if it is not a target dependency.
///
/// Use the [`Display`] trait to access the contents.
///
/// By default all platform dependencies are included in the resolve
/// graph. Use Cargo's `--filter-platform` flag if you only want to
/// include dependencies for a specific platform.
///
/// [`Display`]: std::fmt::Display
pub target: Option<dependency::Platform>,
#[doc(hidden)]
#[serde(skip)]
__do_not_match_exhaustively: (),
}
#[derive(Clone, Serialize, Deserialize, Debug)]
/// One or more crates described by a single `Cargo.toml`
///
/// Each [`target`][Package::targets] of a `Package` will be built as a crate.
/// For more information, see <https://doc.rust-lang.org/book/ch07-01-packages-and-crates.html>.
pub struct Package {
/// Name as given in the `Cargo.toml`
pub name: String,
/// Version given in the `Cargo.toml`
pub version: Version,
/// Authors given in the `Cargo.toml`
#[serde(default)]
pub authors: Vec<String>,
/// An opaque identifier for a package
pub id: PackageId,
/// The source of the package, e.g.
/// crates.io or `None` for local projects.
pub source: Option<Source>,
/// Description as given in the `Cargo.toml`
pub description: Option<String>,
/// List of dependencies of this particular package
pub dependencies: Vec<Dependency>,
/// License as given in the `Cargo.toml`
pub license: Option<String>,
/// If the package is using a nonstandard license, this key may be specified instead of
/// `license`, and must point to a file relative to the manifest.
pub license_file: Option<PathBuf>,
/// Targets provided by the crate (lib, bin, example, test, ...)
pub targets: Vec<Target>,
/// Features provided by the crate, mapped to the features required by that feature.
pub features: HashMap<String, Vec<String>>,
/// Path containing the `Cargo.toml`
pub manifest_path: PathBuf,
/// Categories as given in the `Cargo.toml`
#[serde(default)]
pub categories: Vec<String>,
/// Keywords as given in the `Cargo.toml`
#[serde(default)]
pub keywords: Vec<String>,
/// Readme as given in the `Cargo.toml`
pub readme: Option<PathBuf>,
/// Repository as given in the `Cargo.toml`
// can't use `url::Url` because that requires a more recent stable compiler
pub repository: Option<String>,
/// Homepage as given in the `Cargo.toml`
///
/// On versions of cargo before 1.49, this will always be [`None`].
pub homepage: Option<String>,
/// Documentation URL as given in the `Cargo.toml`
///
/// On versions of cargo before 1.49, this will always be [`None`].
pub documentation: Option<String>,
/// Default Rust edition for the package
///
/// Beware that individual targets may specify their own edition in
/// [`Target::edition`].
#[serde(default = "edition_default")]
pub edition: String,
/// Contents of the free form package.metadata section
///
/// This contents can be serialized to a struct using serde:
///
/// ```rust
/// use serde::Deserialize;
/// use serde_json::json;
///
/// #[derive(Debug, Deserialize)]
/// struct SomePackageMetadata {
/// some_value: i32,
/// }
///
/// fn main() {
/// let value = json!({
/// "some_value": 42,
/// });
///
/// let package_metadata: SomePackageMetadata = serde_json::from_value(value).unwrap();
/// assert_eq!(package_metadata.some_value, 42);
/// }
///
/// ```
#[serde(default, skip_serializing_if = "is_null")]
pub metadata: serde_json::Value,
/// The name of a native library the package is linking to.
pub links: Option<String>,
/// List of registries to which this package may be published.
///
/// Publishing is unrestricted if `None`, and forbidden if the `Vec` is empty.
///
/// This is always `None` if running with a version of Cargo older than 1.39.
pub publish: Option<Vec<String>>,
#[doc(hidden)]
#[serde(skip)]
__do_not_match_exhaustively: (),
}
impl Package {
/// Full path to the license file if one is present in the manifest
pub fn license_file(&self) -> Option<PathBuf> {
self.license_file.as_ref().map(|file| {
self.manifest_path
.parent()
.unwrap_or(&self.manifest_path)
.join(file)
})
}
/// Full path to the readme file if one is present in the manifest
pub fn readme(&self) -> Option<PathBuf> {
self.readme
.as_ref()
.map(|file| self.manifest_path.join(file))
}
}
/// The source of a package such as crates.io.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[serde(transparent)]
pub struct Source {
/// The underlying string representation of a source.
pub repr: String,
}
impl Source {
/// Returns true if the source is crates.io.
pub fn is_crates_io(&self) -> bool {
self.repr == "registry+https://github.com/rust-lang/crates.io-index"
}
}
impl std::fmt::Display for Source {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.repr, f)
}
}
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)]
/// A single target (lib, bin, example, ...) provided by a crate
pub struct Target {
/// Name as given in the `Cargo.toml` or generated from the file name
pub name: String,
/// Kind of target ("bin", "example", "test", "bench", "lib")
pub kind: Vec<String>,
/// Almost the same as `kind`, except when an example is a library instead of an executable.
/// In that case `crate_types` contains things like `rlib` and `dylib` while `kind` is `example`
#[serde(default)]
pub crate_types: Vec<String>,
#[serde(default)]
#[serde(rename = "required-features")]
/// This target is built only if these features are enabled.
/// It doesn't apply to `lib` targets.
pub required_features: Vec<String>,
/// Path to the main source file of the target
pub src_path: PathBuf,
/// Rust edition for this target
#[serde(default = "edition_default")]
pub edition: String,
/// Whether or not this target has doc tests enabled, and the target is
/// compatible with doc testing.
///
/// This is always `true` if running with a version of Cargo older than 1.37.
#[serde(default = "default_true")]
pub doctest: bool,
/// Whether or not this target is tested by default by `cargo test`.
///
/// This is always `true` if running with a version of Cargo older than 1.47.
#[serde(default = "default_true")]
pub test: bool,
#[doc(hidden)]
#[serde(skip)]
__do_not_match_exhaustively: (),
}
fn default_true() -> bool {
true
}
fn edition_default() -> String {
"2015".to_string()
}
/// Cargo features flags
#[derive(Debug, Clone)]
pub enum CargoOpt {
/// Run cargo with `--features-all`
AllFeatures,
/// Run cargo with `--no-default-features`
NoDefaultFeatures,
/// Run cargo with `--features <FEATURES>`
SomeFeatures(Vec<String>),
}
/// A builder for configurating `cargo metadata` invocation.
#[derive(Debug, Clone, Default)]
pub struct MetadataCommand {
cargo_path: Option<PathBuf>,
manifest_path: Option<PathBuf>,
current_dir: Option<PathBuf>,
no_deps: bool,
/// Collections of `CargoOpt::SomeFeatures(..)`
features: Vec<String>,
/// Latched `CargoOpt::AllFeatures`
all_features: bool,
/// Latched `CargoOpt::NoDefaultFeatures`
no_default_features: bool,
other_options: Vec<String>,
}
impl MetadataCommand {
/// Creates a default `cargo metadata` command, which will look for
/// `Cargo.toml` in the ancestors of the current directory.
pub fn new() -> MetadataCommand {
MetadataCommand::default()
}
/// Path to `cargo` executable. If not set, this will use the
/// the `$CARGO` environment variable, and if that is not set, will
/// simply be `cargo`.
pub fn cargo_path(&mut self, path: impl Into<PathBuf>) -> &mut MetadataCommand {
self.cargo_path = Some(path.into());
self
}
/// Path to `Cargo.toml`
pub fn manifest_path(&mut self, path: impl Into<PathBuf>) -> &mut MetadataCommand {
self.manifest_path = Some(path.into());
self
}
/// Current directory of the `cargo metadata` process.
pub fn current_dir(&mut self, path: impl Into<PathBuf>) -> &mut MetadataCommand {
self.current_dir = Some(path.into());
self
}
/// Output information only about the root package and don't fetch dependencies.
pub fn no_deps(&mut self) -> &mut MetadataCommand {
self.no_deps = true;
self
}
/// Which features to include.
///
/// Call this multiple times to specify advanced feature configurations:
///
/// ```no_run
/// # use cargo_metadata::{CargoOpt, MetadataCommand};
/// MetadataCommand::new()
/// .features(CargoOpt::NoDefaultFeatures)
/// .features(CargoOpt::SomeFeatures(vec!["feat1".into(), "feat2".into()]))
/// .features(CargoOpt::SomeFeatures(vec!["feat3".into()]))
/// // ...
/// # ;
/// ```
///
/// # Panics
///
/// `cargo metadata` rejects multiple `--no-default-features` flags. Similarly, the `features()`
/// method panics when specifiying multiple `CargoOpt::NoDefaultFeatures`:
///
/// ```should_panic
/// # use cargo_metadata::{CargoOpt, MetadataCommand};
/// MetadataCommand::new()
/// .features(CargoOpt::NoDefaultFeatures)
/// .features(CargoOpt::NoDefaultFeatures) // <-- panic!
/// // ...
/// # ;
/// ```
///
/// The method also panics for multiple `CargoOpt::AllFeatures` arguments:
///
/// ```should_panic
/// # use cargo_metadata::{CargoOpt, MetadataCommand};
/// MetadataCommand::new()
/// .features(CargoOpt::AllFeatures)
/// .features(CargoOpt::AllFeatures) // <-- panic!
/// // ...
/// # ;
/// ```
pub fn features(&mut self, features: CargoOpt) -> &mut MetadataCommand {
match features {
CargoOpt::SomeFeatures(features) => self.features.extend(features),
CargoOpt::NoDefaultFeatures => {
assert!(
!self.no_default_features,
"Do not supply CargoOpt::NoDefaultFeatures more than once!"
);
self.no_default_features = true;
}
CargoOpt::AllFeatures => {
assert!(
!self.all_features,
"Do not supply CargoOpt::AllFeatures more than once!"
);
self.all_features = true;
}
}
self
}
/// Arbitrary command line flags to pass to `cargo`. These will be added
/// to the end of the command line invocation.
pub fn other_options(&mut self, options: impl Into<Vec<String>>) -> &mut MetadataCommand {
self.other_options = options.into();
self
}
/// Builds a command for `cargo metadata`. This is the first
/// part of the work of `exec`.
pub fn cargo_command(&self) -> Command {
let cargo = self
.cargo_path
.clone()
.or_else(|| env::var("CARGO").map(PathBuf::from).ok())
.unwrap_or_else(|| PathBuf::from("cargo"));
let mut cmd = Command::new(cargo);
cmd.args(&["metadata", "--format-version", "1"]);
if self.no_deps {
cmd.arg("--no-deps");
}
if let Some(path) = self.current_dir.as_ref() {
cmd.current_dir(path);
}
if !self.features.is_empty() {
cmd.arg("--features").arg(self.features.join(","));
}
if self.all_features {
cmd.arg("--all-features");
}
if self.no_default_features {
cmd.arg("--no-default-features");
}
if let Some(manifest_path) = &self.manifest_path {
cmd.arg("--manifest-path").arg(manifest_path.as_os_str());
}
cmd.args(&self.other_options);
cmd
}
/// Parses `cargo metadata` output. `data` must have been
/// produced by a command built with `cargo_command`.
pub fn parse<T: AsRef<str>>(data: T) -> Result<Metadata> {
let meta = serde_json::from_str(data.as_ref())?;
Ok(meta)
}
/// Runs configured `cargo metadata` and returns parsed `Metadata`.
pub fn exec(&self) -> Result<Metadata> {
let output = self.cargo_command().output()?;
if !output.status.success() {
return Err(Error::CargoMetadata {
stderr: String::from_utf8(output.stderr)?,
});
}
let stdout = from_utf8(&output.stdout)?
.lines()
.find(|line| line.starts_with('{'))
.ok_or_else(|| Error::NoJson)?;
Self::parse(stdout)
}
}
| 32.819672 | 100 | 0.607642 |
ac81cea9296b7d2dfdfa9340f92b15419a761b62
| 122 |
use dynomite_derive::Attributes;
#[derive(Attributes)]
struct Foo {
#[dynomite(rename)]
val: u32
}
fn main() {}
| 12.2 | 32 | 0.647541 |
d75ff656bcaceb40cb0e09938a7f57e44b3b84b1
| 1,396 |
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use aws_hyper::StandardClient;
use aws_sdk_kms::operation::GenerateRandom;
use aws_sdk_kms::{Config, Region};
/// Creates a random byte string that is cryptographically secure in __us-east-1__.
#[tokio::main]
async fn main() {
let config = Config::builder()
// region can also be loaded from AWS_DEFAULT_REGION, just remove this line.
.region(Region::new("us-east-1"))
// creds loaded from environment variables, or they can be hard coded.
// Other credential providers not currently supported
.build();
// NB: This example uses the "low level internal API" for demonstration purposes
// This is sometimes necessary to get precise control over behavior, but in most cases
// using `kms::Client` is recommended.
let client: StandardClient = aws_hyper::Client::https();
let data = client
.call(
GenerateRandom::builder()
.number_of_bytes(64)
.build()
.expect("valid operation")
.make_operation(&config)
.expect("valid operation"),
)
.await
.expect("failed to generate random data");
println!("{:?}", data);
assert_eq!(data.plaintext.expect("should have data").as_ref().len(), 64);
}
| 37.72973 | 90 | 0.637536 |
fcbaee7ebd4f9cb82528743c67c5d05d1090e920
| 20,081 |
//! Implementation of in-band secret distribution abstractions
//! for Zcash transactions. The implementations here provide
//! functionality that is shared between the Sapling and Orchard
//! protocols.
// Catch documentation errors caused by code changes.
#![deny(broken_intra_doc_links)]
#![deny(unsafe_code)]
// TODO: #![deny(missing_docs)]
use std::convert::TryInto;
use chacha20::{
cipher::{NewCipher, StreamCipher, StreamCipherSeek},
ChaCha20,
};
use chacha20poly1305::{
aead::{AeadInPlace, NewAead},
ChaCha20Poly1305,
};
use rand_core::RngCore;
use subtle::{Choice, ConstantTimeEq};
pub mod batch;
pub const COMPACT_NOTE_SIZE: usize = 1 + // version
11 + // diversifier
8 + // value
32; // rseed (or rcm prior to ZIP 212)
pub const NOTE_PLAINTEXT_SIZE: usize = COMPACT_NOTE_SIZE + 512;
pub const OUT_PLAINTEXT_SIZE: usize = 32 + // pk_d
32; // esk
pub const AEAD_TAG_SIZE: usize = 16;
pub const ENC_CIPHERTEXT_SIZE: usize = NOTE_PLAINTEXT_SIZE + AEAD_TAG_SIZE;
pub const OUT_CIPHERTEXT_SIZE: usize = OUT_PLAINTEXT_SIZE + AEAD_TAG_SIZE;
/// A symmetric key that can be used to recover a single Sapling or Orchard output.
pub struct OutgoingCipherKey(pub [u8; 32]);
impl From<[u8; 32]> for OutgoingCipherKey {
fn from(ock: [u8; 32]) -> Self {
OutgoingCipherKey(ock)
}
}
impl AsRef<[u8]> for OutgoingCipherKey {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
#[derive(Clone, Debug)]
pub struct EphemeralKeyBytes(pub [u8; 32]);
impl AsRef<[u8]> for EphemeralKeyBytes {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl From<[u8; 32]> for EphemeralKeyBytes {
fn from(value: [u8; 32]) -> EphemeralKeyBytes {
EphemeralKeyBytes(value)
}
}
impl ConstantTimeEq for EphemeralKeyBytes {
fn ct_eq(&self, other: &Self) -> Choice {
self.0.ct_eq(&other.0)
}
}
pub struct NotePlaintextBytes(pub [u8; NOTE_PLAINTEXT_SIZE]);
pub struct OutPlaintextBytes(pub [u8; OUT_PLAINTEXT_SIZE]);
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum NoteValidity {
Valid,
Invalid,
}
pub trait Domain {
type EphemeralSecretKey: ConstantTimeEq;
type EphemeralPublicKey;
type SharedSecret;
type SymmetricKey: AsRef<[u8]>;
type Note;
type Recipient;
type DiversifiedTransmissionKey;
type IncomingViewingKey;
type OutgoingViewingKey;
type ValueCommitment;
type ExtractedCommitment;
type ExtractedCommitmentBytes: Eq + for<'a> From<&'a Self::ExtractedCommitment>;
type Memo;
fn derive_esk(note: &Self::Note) -> Option<Self::EphemeralSecretKey>;
fn get_pk_d(note: &Self::Note) -> Self::DiversifiedTransmissionKey;
fn ka_derive_public(
note: &Self::Note,
esk: &Self::EphemeralSecretKey,
) -> Self::EphemeralPublicKey;
fn ka_agree_enc(
esk: &Self::EphemeralSecretKey,
pk_d: &Self::DiversifiedTransmissionKey,
) -> Self::SharedSecret;
fn ka_agree_dec(
ivk: &Self::IncomingViewingKey,
epk: &Self::EphemeralPublicKey,
) -> Self::SharedSecret;
fn kdf(secret: Self::SharedSecret, ephemeral_key: &EphemeralKeyBytes) -> Self::SymmetricKey;
// for right now, we just need `recipient` to get `d`; in the future when we
// can get that from a Sapling note, the recipient parameter will be able
// to be removed.
fn note_plaintext_bytes(
note: &Self::Note,
recipient: &Self::Recipient,
memo: &Self::Memo,
) -> NotePlaintextBytes;
fn derive_ock(
ovk: &Self::OutgoingViewingKey,
cv: &Self::ValueCommitment,
cmstar_bytes: &Self::ExtractedCommitmentBytes,
ephemeral_key: &EphemeralKeyBytes,
) -> OutgoingCipherKey;
fn outgoing_plaintext_bytes(
note: &Self::Note,
esk: &Self::EphemeralSecretKey,
) -> OutPlaintextBytes;
fn epk_bytes(epk: &Self::EphemeralPublicKey) -> EphemeralKeyBytes;
fn epk(ephemeral_key: &EphemeralKeyBytes) -> Option<Self::EphemeralPublicKey>;
fn check_epk_bytes<F: Fn(&Self::EphemeralSecretKey) -> NoteValidity>(
note: &Self::Note,
check: F,
) -> NoteValidity;
fn cmstar(note: &Self::Note) -> Self::ExtractedCommitment;
fn parse_note_plaintext_without_memo_ivk(
&self,
ivk: &Self::IncomingViewingKey,
plaintext: &[u8],
) -> Option<(Self::Note, Self::Recipient)>;
fn parse_note_plaintext_without_memo_ovk(
&self,
pk_d: &Self::DiversifiedTransmissionKey,
esk: &Self::EphemeralSecretKey,
ephemeral_key: &EphemeralKeyBytes,
plaintext: &[u8],
) -> Option<(Self::Note, Self::Recipient)>;
// &self is passed here in anticipation of future changes
// to memo handling where the memos may no longer be
// part of the note plaintext.
fn extract_memo(&self, plaintext: &[u8]) -> Self::Memo;
fn extract_pk_d(
out_plaintext: &[u8; OUT_PLAINTEXT_SIZE],
) -> Option<Self::DiversifiedTransmissionKey>;
fn extract_esk(out_plaintext: &[u8; OUT_PLAINTEXT_SIZE]) -> Option<Self::EphemeralSecretKey>;
}
pub trait BatchDomain: Domain {
/// Computes `Self::kdf` on a batch of items.
///
/// For each item in the batch, if the shared secret is `None`, this returns `None` at
/// that position.
fn batch_kdf<'a>(
items: impl Iterator<Item = (Option<Self::SharedSecret>, &'a EphemeralKeyBytes)>,
) -> Vec<Option<Self::SymmetricKey>> {
// Default implementation: do the non-batched thing.
items
.map(|(secret, ephemeral_key)| secret.map(|secret| Self::kdf(secret, ephemeral_key)))
.collect()
}
/// Computes `Self::epk` on a batch of ephemeral keys.
///
/// This is useful for protocols where the underlying curve requires an inversion to
/// parse an encoded point.
///
/// For usability, this returns tuples of the ephemeral keys and the result of parsing
/// them.
fn batch_epk(
ephemeral_keys: impl Iterator<Item = EphemeralKeyBytes>,
) -> Vec<(Option<Self::EphemeralPublicKey>, EphemeralKeyBytes)> {
// Default implementation: do the non-batched thing.
ephemeral_keys
.map(|ephemeral_key| (Self::epk(&ephemeral_key), ephemeral_key))
.collect()
}
}
pub trait ShieldedOutput<D: Domain> {
fn ephemeral_key(&self) -> EphemeralKeyBytes;
fn cmstar_bytes(&self) -> D::ExtractedCommitmentBytes;
fn enc_ciphertext(&self) -> &[u8];
}
/// A struct containing context required for encrypting Sapling and Orchard notes.
///
/// This struct provides a safe API for encrypting Sapling and Orchard notes. In particular, it
/// enforces that fresh ephemeral keys are used for every note, and that the ciphertexts are
/// consistent with each other.
///
/// Implements section 4.19 of the
/// [Zcash Protocol Specification](https://zips.z.cash/protocol/nu5.pdf#saplingandorchardinband)
/// NB: the example code is only covering the post-Canopy case.
///
/// # Examples
///
/// ```
/// extern crate ff;
/// extern crate rand_core;
/// extern crate zcash_primitives;
///
/// use ff::Field;
/// use rand_core::OsRng;
/// use zcash_primitives::{
/// consensus::{TEST_NETWORK, TestNetwork, NetworkUpgrade, Parameters},
/// memo::MemoBytes,
/// sapling::{
/// keys::{OutgoingViewingKey, prf_expand},
/// note_encryption::sapling_note_encryption,
/// util::generate_random_rseed,
/// Diversifier, PaymentAddress, Rseed, ValueCommitment
/// },
/// };
///
/// let mut rng = OsRng;
///
/// let diversifier = Diversifier([0; 11]);
/// let pk_d = diversifier.g_d().unwrap();
/// let to = PaymentAddress::from_parts(diversifier, pk_d).unwrap();
/// let ovk = Some(OutgoingViewingKey([0; 32]));
///
/// let value = 1000;
/// let rcv = jubjub::Fr::random(&mut rng);
/// let cv = ValueCommitment {
/// value,
/// randomness: rcv.clone(),
/// };
/// let height = TEST_NETWORK.activation_height(NetworkUpgrade::Canopy).unwrap();
/// let rseed = generate_random_rseed(&TEST_NETWORK, height, &mut rng);
/// let note = to.create_note(value, rseed).unwrap();
/// let cmu = note.cmu();
///
/// let mut enc = sapling_note_encryption::<_, TestNetwork>(ovk, note, to, MemoBytes::empty(), &mut rng);
/// let encCiphertext = enc.encrypt_note_plaintext();
/// let outCiphertext = enc.encrypt_outgoing_plaintext(&cv.commitment().into(), &cmu, &mut rng);
/// ```
pub struct NoteEncryption<D: Domain> {
epk: D::EphemeralPublicKey,
esk: D::EphemeralSecretKey,
note: D::Note,
to: D::Recipient,
memo: D::Memo,
/// `None` represents the `ovk = ⊥` case.
ovk: Option<D::OutgoingViewingKey>,
}
impl<D: Domain> NoteEncryption<D> {
/// Construct a new note encryption context for the specified note,
/// recipient, and memo.
pub fn new(
ovk: Option<D::OutgoingViewingKey>,
note: D::Note,
to: D::Recipient,
memo: D::Memo,
) -> Self {
let esk = D::derive_esk(¬e).expect("ZIP 212 is active.");
Self::new_with_esk(esk, ovk, note, to, memo)
}
/// For use only with Sapling. This method is preserved in order that test code
/// be able to generate pre-ZIP-212 ciphertexts so that tests can continue to
/// cover pre-ZIP-212 transaction decryption.
pub fn new_with_esk(
esk: D::EphemeralSecretKey,
ovk: Option<D::OutgoingViewingKey>,
note: D::Note,
to: D::Recipient,
memo: D::Memo,
) -> Self {
NoteEncryption {
epk: D::ka_derive_public(¬e, &esk),
esk,
note,
to,
memo,
ovk,
}
}
/// Exposes the ephemeral secret key being used to encrypt this note.
pub fn esk(&self) -> &D::EphemeralSecretKey {
&self.esk
}
/// Exposes the encoding of the ephemeral public key being used to encrypt this note.
pub fn epk(&self) -> &D::EphemeralPublicKey {
&self.epk
}
/// Generates `encCiphertext` for this note.
pub fn encrypt_note_plaintext(&self) -> [u8; ENC_CIPHERTEXT_SIZE] {
let pk_d = D::get_pk_d(&self.note);
let shared_secret = D::ka_agree_enc(&self.esk, &pk_d);
let key = D::kdf(shared_secret, &D::epk_bytes(&self.epk));
let input = D::note_plaintext_bytes(&self.note, &self.to, &self.memo);
let mut output = [0u8; ENC_CIPHERTEXT_SIZE];
output[..NOTE_PLAINTEXT_SIZE].copy_from_slice(&input.0);
let tag = ChaCha20Poly1305::new(key.as_ref().into())
.encrypt_in_place_detached(
[0u8; 12][..].into(),
&[],
&mut output[..NOTE_PLAINTEXT_SIZE],
)
.unwrap();
output[NOTE_PLAINTEXT_SIZE..].copy_from_slice(&tag);
output
}
/// Generates `outCiphertext` for this note.
pub fn encrypt_outgoing_plaintext<R: RngCore>(
&self,
cv: &D::ValueCommitment,
cmstar: &D::ExtractedCommitment,
rng: &mut R,
) -> [u8; OUT_CIPHERTEXT_SIZE] {
let (ock, input) = if let Some(ovk) = &self.ovk {
let ock = D::derive_ock(ovk, &cv, &cmstar.into(), &D::epk_bytes(&self.epk));
let input = D::outgoing_plaintext_bytes(&self.note, &self.esk);
(ock, input)
} else {
// ovk = ⊥
let mut ock = OutgoingCipherKey([0; 32]);
let mut input = [0u8; OUT_PLAINTEXT_SIZE];
rng.fill_bytes(&mut ock.0);
rng.fill_bytes(&mut input);
(ock, OutPlaintextBytes(input))
};
let mut output = [0u8; OUT_CIPHERTEXT_SIZE];
output[..OUT_PLAINTEXT_SIZE].copy_from_slice(&input.0);
let tag = ChaCha20Poly1305::new(ock.as_ref().into())
.encrypt_in_place_detached([0u8; 12][..].into(), &[], &mut output[..OUT_PLAINTEXT_SIZE])
.unwrap();
output[OUT_PLAINTEXT_SIZE..].copy_from_slice(&tag);
output
}
}
/// Trial decryption of the full note plaintext by the recipient.
///
/// Attempts to decrypt and validate the given `enc_ciphertext` using the given `ivk`.
/// If successful, the corresponding note and memo are returned, along with the address to
/// which the note was sent.
///
/// Implements section 4.19.2 of the
/// [Zcash Protocol Specification](https://zips.z.cash/protocol/nu5.pdf#decryptivk).
pub fn try_note_decryption<D: Domain, Output: ShieldedOutput<D>>(
domain: &D,
ivk: &D::IncomingViewingKey,
output: &Output,
) -> Option<(D::Note, D::Recipient, D::Memo)> {
let ephemeral_key = output.ephemeral_key();
let epk = D::epk(&ephemeral_key)?;
let shared_secret = D::ka_agree_dec(ivk, &epk);
let key = D::kdf(shared_secret, &ephemeral_key);
try_note_decryption_inner(domain, ivk, &ephemeral_key, output, key)
}
fn try_note_decryption_inner<D: Domain, Output: ShieldedOutput<D>>(
domain: &D,
ivk: &D::IncomingViewingKey,
ephemeral_key: &EphemeralKeyBytes,
output: &Output,
key: D::SymmetricKey,
) -> Option<(D::Note, D::Recipient, D::Memo)> {
let enc_ciphertext = output.enc_ciphertext();
assert_eq!(enc_ciphertext.len(), ENC_CIPHERTEXT_SIZE);
let mut plaintext: [u8; NOTE_PLAINTEXT_SIZE] =
enc_ciphertext[..NOTE_PLAINTEXT_SIZE].try_into().unwrap();
ChaCha20Poly1305::new(key.as_ref().into())
.decrypt_in_place_detached(
[0u8; 12][..].into(),
&[],
&mut plaintext,
enc_ciphertext[NOTE_PLAINTEXT_SIZE..].into(),
)
.ok()?;
let (note, to) = parse_note_plaintext_without_memo_ivk(
domain,
ivk,
ephemeral_key,
&output.cmstar_bytes(),
&plaintext,
)?;
let memo = domain.extract_memo(&plaintext);
Some((note, to, memo))
}
fn parse_note_plaintext_without_memo_ivk<D: Domain>(
domain: &D,
ivk: &D::IncomingViewingKey,
ephemeral_key: &EphemeralKeyBytes,
cmstar_bytes: &D::ExtractedCommitmentBytes,
plaintext: &[u8],
) -> Option<(D::Note, D::Recipient)> {
let (note, to) = domain.parse_note_plaintext_without_memo_ivk(ivk, &plaintext)?;
if let NoteValidity::Valid = check_note_validity::<D>(¬e, ephemeral_key, cmstar_bytes) {
Some((note, to))
} else {
None
}
}
fn check_note_validity<D: Domain>(
note: &D::Note,
ephemeral_key: &EphemeralKeyBytes,
cmstar_bytes: &D::ExtractedCommitmentBytes,
) -> NoteValidity {
if &D::ExtractedCommitmentBytes::from(&D::cmstar(¬e)) == cmstar_bytes {
D::check_epk_bytes(¬e, |derived_esk| {
if D::epk_bytes(&D::ka_derive_public(¬e, &derived_esk))
.ct_eq(&ephemeral_key)
.into()
{
NoteValidity::Valid
} else {
NoteValidity::Invalid
}
})
} else {
// Published commitment doesn't match calculated commitment
NoteValidity::Invalid
}
}
/// Trial decryption of the compact note plaintext by the recipient for light clients.
///
/// Attempts to decrypt and validate the first 52 bytes of `enc_ciphertext` using the
/// given `ivk`. If successful, the corresponding note is returned, along with the address
/// to which the note was sent.
///
/// Implements the procedure specified in [`ZIP 307`].
///
/// [`ZIP 307`]: https://zips.z.cash/zip-0307
pub fn try_compact_note_decryption<D: Domain, Output: ShieldedOutput<D>>(
domain: &D,
ivk: &D::IncomingViewingKey,
output: &Output,
) -> Option<(D::Note, D::Recipient)> {
let ephemeral_key = output.ephemeral_key();
let epk = D::epk(&ephemeral_key)?;
let shared_secret = D::ka_agree_dec(&ivk, &epk);
let key = D::kdf(shared_secret, &ephemeral_key);
try_compact_note_decryption_inner(domain, ivk, &ephemeral_key, output, key)
}
fn try_compact_note_decryption_inner<D: Domain, Output: ShieldedOutput<D>>(
domain: &D,
ivk: &D::IncomingViewingKey,
ephemeral_key: &EphemeralKeyBytes,
output: &Output,
key: D::SymmetricKey,
) -> Option<(D::Note, D::Recipient)> {
assert_eq!(output.enc_ciphertext().len(), COMPACT_NOTE_SIZE);
// Start from block 1 to skip over Poly1305 keying output
let mut plaintext = [0; COMPACT_NOTE_SIZE];
plaintext.copy_from_slice(output.enc_ciphertext());
let mut keystream = ChaCha20::new(key.as_ref().into(), [0u8; 12][..].into());
keystream.seek(64);
keystream.apply_keystream(&mut plaintext);
parse_note_plaintext_without_memo_ivk(
domain,
ivk,
ephemeral_key,
&output.cmstar_bytes(),
&plaintext,
)
}
/// Recovery of the full note plaintext by the sender.
///
/// Attempts to decrypt and validate the given `enc_ciphertext` using the given `ovk`.
/// If successful, the corresponding note and memo are returned, along with the address to
/// which the note was sent.
///
/// Implements [Zcash Protocol Specification section 4.19.3][decryptovk].
///
/// [decryptovk]: https://zips.z.cash/protocol/nu5.pdf#decryptovk
pub fn try_output_recovery_with_ovk<D: Domain, Output: ShieldedOutput<D>>(
domain: &D,
ovk: &D::OutgoingViewingKey,
output: &Output,
cv: &D::ValueCommitment,
out_ciphertext: &[u8],
) -> Option<(D::Note, D::Recipient, D::Memo)> {
let ock = D::derive_ock(ovk, &cv, &output.cmstar_bytes(), &output.ephemeral_key());
try_output_recovery_with_ock(domain, &ock, output, out_ciphertext)
}
/// Recovery of the full note plaintext by the sender.
///
/// Attempts to decrypt and validate the given `enc_ciphertext` using the given `ock`.
/// If successful, the corresponding note and memo are returned, along with the address to
/// which the note was sent.
///
/// Implements part of section 4.19.3 of the
/// [Zcash Protocol Specification](https://zips.z.cash/protocol/nu5.pdf#decryptovk).
/// For decryption using a Full Viewing Key see [`try_output_recovery_with_ovk`].
pub fn try_output_recovery_with_ock<D: Domain, Output: ShieldedOutput<D>>(
domain: &D,
ock: &OutgoingCipherKey,
output: &Output,
out_ciphertext: &[u8],
) -> Option<(D::Note, D::Recipient, D::Memo)> {
let enc_ciphertext = output.enc_ciphertext();
assert_eq!(enc_ciphertext.len(), ENC_CIPHERTEXT_SIZE);
assert_eq!(out_ciphertext.len(), OUT_CIPHERTEXT_SIZE);
let mut op = [0; OUT_PLAINTEXT_SIZE];
op.copy_from_slice(&out_ciphertext[..OUT_PLAINTEXT_SIZE]);
ChaCha20Poly1305::new(ock.as_ref().into())
.decrypt_in_place_detached(
[0u8; 12][..].into(),
&[],
&mut op,
out_ciphertext[OUT_PLAINTEXT_SIZE..].into(),
)
.ok()?;
let pk_d = D::extract_pk_d(&op)?;
let esk = D::extract_esk(&op)?;
let ephemeral_key = output.ephemeral_key();
let shared_secret = D::ka_agree_enc(&esk, &pk_d);
// The small-order point check at the point of output parsing rejects
// non-canonical encodings, so reencoding here for the KDF should
// be okay.
let key = D::kdf(shared_secret, &ephemeral_key);
let mut plaintext = [0; NOTE_PLAINTEXT_SIZE];
plaintext.copy_from_slice(&enc_ciphertext[..NOTE_PLAINTEXT_SIZE]);
ChaCha20Poly1305::new(key.as_ref().into())
.decrypt_in_place_detached(
[0u8; 12][..].into(),
&[],
&mut plaintext,
enc_ciphertext[NOTE_PLAINTEXT_SIZE..].into(),
)
.ok()?;
let (note, to) =
domain.parse_note_plaintext_without_memo_ovk(&pk_d, &esk, &ephemeral_key, &plaintext)?;
let memo = domain.extract_memo(&plaintext);
// ZIP 212: Check that the esk provided to this function is consistent with the esk we
// can derive from the note.
if let Some(derived_esk) = D::derive_esk(¬e) {
if (!derived_esk.ct_eq(&esk)).into() {
return None;
}
}
if let NoteValidity::Valid =
check_note_validity::<D>(¬e, &ephemeral_key, &output.cmstar_bytes())
{
Some((note, to, memo))
} else {
None
}
}
| 32.973727 | 105 | 0.642747 |
112580a3c1e1af5c1ad644c35b7d72ee9e332461
| 1,549 |
extern crate rtag;
use std::fs::File;
use std::io::Cursor;
use std::vec::Vec;
use rtag::rw::*;
#[test]
fn readable_bytes() {
let valid = "0123456789".to_string();
let mut readable = Cursor::new(valid.into_bytes());
assert!(readable.read_bytes(10).is_ok());
assert!(readable.read_bytes(10).is_err());
let str = "AB가나01".to_string();
let mut readable = Cursor::new(str.into_bytes());
assert!(readable.skip_bytes(1).is_ok());
assert_eq!(readable.read_string(1).unwrap(), "B");
// utf8, 3bytes
assert_eq!(readable.read_string(3).unwrap(), "가");
assert_eq!(readable.read_string(5).unwrap(), "나01");
assert!(readable.read_bytes(1).is_err());
}
#[test]
fn readable_file() {
let mut readable = File::open("./test-resources/file1.txt").unwrap();
assert!(readable.read_bytes(10).is_ok());
assert!(readable.read_bytes(10).is_ok());
assert!(readable.skip_bytes(-5).is_ok());
assert_eq!(readable.read_string(5).unwrap(), "fghij");
assert!(readable.read_bytes(10).is_err());
}
#[test]
fn readable_utf16_string() {
let str = "AB가나01".to_string();
let mut bytes: Vec<u8> = str.into_bytes();
bytes.push(0x00);
bytes.push(0x01);
bytes.push(0x00);
bytes.push(0x00);
bytes.push(0x02);
assert_eq!(bytes.len(), 15);
let mut readable = Cursor::new(bytes);
let read = readable.read_utf16_string().unwrap();
assert_eq!("AB\u{ac00}\u{b098}01\u{0}\u{1}", read);
assert!(readable.skip_bytes(1).is_ok());
assert!(readable.read_bytes(1).is_err());
}
| 30.372549 | 73 | 0.644932 |
1111b4a1d2ec931c2101d89d7f0726470d5083ff
| 2,217 |
// ██████╗ █████╗ ███████╗███████╗██╗███╗ ██╗ ██████╗
// ██╔══██╗██╔══██╗██╔════╝██╔════╝██║████╗ ██║██╔════╝
// ██████╔╝███████║███████╗███████╗██║██╔██╗ ██║██║ ███╗
// ██╔═══╝ ██╔══██║╚════██║╚════██║██║██║╚██╗██║██║ ██║
// ██║ ██║ ██║███████║███████║██║██║ ╚████║╚██████╔╝
// ╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝╚═╝╚═╝ ╚═══╝ ╚═════╝
#[cfg(test)]
mod passing {
use crate::url;
#[test]
fn mailto() {
assert!(url::url_has_protocol(
"mailto:[email protected]?subject=hello"
));
}
#[test]
fn tel() {
assert!(url::url_has_protocol("tel:5551234567"));
}
#[test]
fn ftp_no_slashes() {
assert!(url::url_has_protocol("ftp:some-ftp-server.com"));
}
#[test]
fn ftp_with_credentials() {
assert!(url::url_has_protocol(
"ftp://user:[email protected]"
));
}
#[test]
fn javascript() {
assert!(url::url_has_protocol("javascript:void(0)"));
}
#[test]
fn http() {
assert!(url::url_has_protocol("http://news.ycombinator.com"));
}
#[test]
fn https() {
assert!(url::url_has_protocol("https://github.com"));
}
#[test]
fn mailto_uppercase() {
assert!(url::url_has_protocol(
"MAILTO:[email protected]?subject=hello"
));
}
}
// ███████╗ █████╗ ██╗██╗ ██╗███╗ ██╗ ██████╗
// ██╔════╝██╔══██╗██║██║ ██║████╗ ██║██╔════╝
// █████╗ ███████║██║██║ ██║██╔██╗ ██║██║ ███╗
// ██╔══╝ ██╔══██║██║██║ ██║██║╚██╗██║██║ ██║
// ██║ ██║ ██║██║███████╗██║██║ ╚████║╚██████╔╝
// ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝╚═╝╚═╝ ╚═══╝ ╚═════╝
#[cfg(test)]
mod failing {
use crate::utils;
#[test]
fn url_with_no_protocol() {
assert!(!url::url_has_protocol(
"//some-hostname.com/some-file.html"
));
}
#[test]
fn relative_path() {
assert!(!url::url_has_protocol("some-hostname.com/some-file.html"));
}
#[test]
fn relative_to_root_path() {
assert!(!url::url_has_protocol("/some-file.html"));
}
#[test]
fn empty_string() {
assert!(!url::url_has_protocol(""));
}
}
| 24.097826 | 76 | 0.37438 |
d595130a6903844205be4ad677868c071ebfb289
| 1,517 |
#[cfg(target_arch = "x86_64")]
pub mod amd64;
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")] {
pub use amd64::central::arch_info;
pub use amd64::central::init::arch_init;
}
}
/// Reexports for the virtual memory subsystem
pub mod memory {
#[derive(Debug)]
pub enum MapError {
PhysicalAllocationFailed,
AlreadyMapped
}
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")] {
pub use super::amd64::memory::{
HEAP_END,
HEAP_SIZE,
HEAP_START,
};
pub use super::amd64::memory::virtual_memory::{
map_range,
map_virtual_address,
map_virtual_address_unlazily
};
}
}
}
pub mod interrupts {
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")] {
pub use x86_64::instructions::interrupts::{
are_enabled as are_interrupts_enabled,
enable as enable_interrupts,
disable as disable_interrupts
};
}
}
/// Run a function with interrupts disabled.
/// Afterwards, this function restores interrupts to their
/// previous state, whether they were enabled or disabled.
pub fn without_interrupts<T>(f: impl FnOnce() -> T) {
let should_enable = are_interrupts_enabled();
disable_interrupts();
f();
if should_enable {
enable_interrupts();
}
}
}
| 25.711864 | 62 | 0.548451 |
28f18f71ddd38055abba61e643913130f575af48
| 195 |
pub mod token;
pub mod matcher;
pub mod tokenizer;
pub mod lexer;
pub use super::source::*;
pub use self::token::*;
pub use self::matcher::*;
pub use self::tokenizer::*;
pub use self::lexer::*;
| 17.727273 | 27 | 0.682051 |
21074b11911617a5c0b4756c08df4d17f20b16c0
| 13,187 |
// LNP/BP Core Library implementing LNPBP specifications & standards
// Written in 2020 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
//
// The author of the code acknowledges significant input from Peter Todd,
// who is the author of single-use-seal concept and who spent a lot of his time
// to help to understanding single-use-seal concept and write the current
// implementation.
//! # Single-use-seals
//!
//! Set of traits that allow to implement Peter's Todd **single-use seal**
//! paradigm. Information in this file partially contains extracts from Peter's
//! works listed in "Further reading" section.
//!
//! ## Single-use-seal definition
//!
//! Analogous to the real-world, physical, single-use-seals used to secure
//! shipping containers, a single-use-seal primitive is a unique object that can
//! be closed over a message exactly once. In short, a single-use-seal is an
//! abstract mechanism to prevent double-spends.
//!
//! A single-use-seal implementation supports two fundamental operations:
//! * `Close(l,m) → w` — Close seal l over message m, producing a witness `w`.
//! * `Verify(l,w,m) → bool` — Verify that the seal l was closed over message
//! `m`.
//!
//! A single-use-seal implementation is secure if it is impossible for an
//! attacker to cause the Verify function to return true for two distinct
//! messages m1, m2, when applied to the same seal (it is acceptable, although
//! non-ideal, for there to exist multiple witnesses for the same seal/message
//! pair).
//!
//! Practical single-use-seal implementations will also obviously require some
//! way of generating new single-use-seals:
//! * `Gen(p)→l` — Generate a new seal basing on some seal definition data `p`.
//!
//! ## Terminology
//!
//! **Single-use-seal**: a commitment to commit to some (potentially unknown)
//! message. The first commitment (i.e. single-use-seal) must be a
//! well-defined (i.e. fully specified and unequally identifiable
//! in some space, like in time/plance or within a given formal informational
//! system).
//! **Closing of a single-use-seal over message**: a fulfilment of the first
//! commitment: creation of the actual commitment to some message in a form
//! unequally defined by the seal.
//! **Witness**: data produced with closing of a single use seal which are
//! required and sufficient for an independent party to verify that the seal
//! was indeed closed over a given message (i.e. the commitment to the message
//! had being created according to the seal definition).
//!
//! NB: It's important to note, that while its possible to deterministically
//! define was a given seal closed it yet may be not possible to find out
//! if the seal is open; i.e. seal status may be either "closed over message"
//! or "unknown". Some specific implementations of single-use-seals may define
//! procedure to deterministically prove that a given seal is not closed (i.e.
//! opened), however this is not a part of the specification and we should
//! not rely on the existence of such possibility in all cases.
//!
//! ## Trait structure
//!
//! The module defines trait [SingleUseSeal] that can be used for implementation
//! of single-use-seals with methods for seal close and verification. A type
//! implementing this trait operates only with messages (which is represented
//! by [Message] type alias – in fact any type that implements `AsRef<[u8]>`,
//! i.e. can be represented as a sequence of bytes) and witnesses (which is
//! represented by an associated type [SingleUseSeal::Witness]). At the same time,
//! [SingleUseSeal] can't define seals by itself — and also knows nothing about
//! whether the seal is in fact closed: this requires a "seal medium": a proof
//! of publication medium on which the seals are defined.
//!
//! The module provides two options of implementing sch medium: synchonous
//! [SealMedium] and asynchronous [AsyncSealMedium].
//!
//! ## Sample implementation
//!
//! Examples of implementations can be found in [bp::seals][crate::bp::seals]
//! module of the crate source code.
//!
//! ## Further reading
//!
//! * Peter Todd. Preventing Consensus Fraud with Commitments and
//! Single-Use-Seals.
//! <https://petertodd.org/2016/commitments-and-single-use-seals>.
//! * Peter Todd. Scalable Semi-Trustless Asset Transfer via Single-Use-Seals
//! and Proof-of-Publication. 1. Single-Use-Seal Definition.
//! <https://petertodd.org/2017/scalable-single-use-seal-asset-transfer>
/// Message type that can be used to close the seal over it
pub type Message = dyn AsRef<[u8]>;
/// Single-use-seal trait: implement for a data structure that will hold a
/// single-use-seal definition and will contain a business logic for closing
/// seal over some message and verification of the seal against the message
/// and witness.
///
/// NB: It is recommended that single-use-seal instances to be instantiated
/// not by a constructor, but by a factory, i.e. "seal medium": data type
/// implementing either [SealMedium] or [AsyncSealMedium] traits.
pub trait SingleUseSeal {
/// Associated type for the witness produced by the single-use-seal close
/// procedure
type Witness;
/// Type that contains seal definition
type Definition;
/// Closing and verification errors
type Error: std::error::Error;
/// NB: Closing of the seal MUST not change the internal state of the
/// seal itself; all the data produced by the process must be placed
/// into the returned Witness type
fn close(&self, over: &Message) -> Result<Self::Witness, Self::Error>;
fn verify(&self, msg: &Message, witness: &Self::Witness) -> Result<bool, Self::Error>;
}
/// Trait for proof-of-publication medium on which the seals are defined and
/// which can be used for convenience operations related to seals:
/// * finding out the seal status
/// * publishing witness information
/// * get some identifier on the exact place of the witness publication
/// * check validity of the witness publication identifier
///
/// Since the medium may require network communications or extensive computing
/// involved (like in case with blockchain) there is a special asynchronous
/// version of the SealMedium [AsyncSealMedium], which requires use of
/// `async` feature of this crate.
///
/// All these operations are medium-specific; for the same sinle-use-seal type
/// they may differ when are applied to different proof of publication mediums.
///
/// To read more on proof-of-publication please check
/// <https://petertodd.org/2014/setting-the-record-proof-of-publication>
pub trait SealMedium<'a, SEAL>
where
SEAL: SingleUseSeal,
{
/// Publication id that may be used for referencing publication of
/// witness data in the medium. By default set `()`, so [SealMedium]
/// may not implement publication id and related functions
type PublicationId = ();
/// Error type that contains reasons of medium access failure
type Error: std::error::Error;
/// Creates a single-use-seal having type of implementation-specific generic
/// parameter `SEAL`.
fn define_seal(&'a self, definition: &SEAL::Definition) -> Result<SEAL, Self::Error>;
/// Checks the status for a given seal in proof-of-publication medium
fn get_seal_status(&self, seal: &SEAL) -> Result<SealStatus, Self::Error>;
/// Publishes witness data to the medium. Function has default implementation
/// doing nothing and returning [SealMediumError::PublicationIdNotSupported]
/// error.
fn publish_witness(
&mut self,
_witness: &SEAL::Witness,
) -> Result<Self::PublicationId, SealMediumError<Self::Error>> {
Err(SealMediumError::PublicationIdNotSupported)
}
/// Returns [Self::PublicationId] for a given witness, if any; the id is
/// returned as an option. Function has default implementation doing
/// nothing and just returning [SealMediumError::PublicationIdNotSupported]
/// error.
fn get_witness_publication_id(
&self,
_witness: &SEAL::Witness,
) -> Result<Option<Self::PublicationId>, SealMediumError<Self::Error>> {
Err(SealMediumError::PublicationIdNotSupported)
}
/// Validates whether a given publication id is present in the medium.
/// Function has default implementation doing nothing and returning
/// [SealMediumError::PublicationIdNotSupported] error.
fn validate_publication_id(
&self,
_publication_id: &Self::PublicationId,
) -> Result<bool, SealMediumError<Self::Error>> {
Err(SealMediumError::PublicationIdNotSupported)
}
}
/// Asynchronous version of the [SealMedium] trait.
#[cfg(feature = "async")]
#[async_trait]
pub trait SealMediumAsync<SEAL>
where
SEAL: SingleUseSeal + Sync + Send,
SEAL::Witness: Sync + Send,
Self::PublicationId: Sync,
{
/// Publication id that may be used for referencing publication of
/// witness data in the medium. By default set `()`, so [SealMedium]
/// may not implement publication id and related functions
type PublicationId = ();
/// Error type that contains reasons of medium access failure
type Error: std::error::Error;
/// Creates a single-use-seal having type of implementation-specific generic
/// parameter `SEAL`.
async fn define_seal<D>(&self, definition: &D) -> Result<SEAL, Self::Error>;
/// Checks the status for a given seal in proof-of-publication medium
async fn get_seal_status(&self, seal: &SEAL) -> Result<SealStatus, Self::Error>;
/// Publishes witness data to the medium. Function has default implementation
/// doing nothing and returning [SealMediumError::PublicationIdNotSupported]
/// error.
async fn publish_witness(
&mut self,
_witness: &SEAL::Witness,
) -> Result<Self::PublicationId, SealMediumError<Self::Error>>
where
SEAL: 'async_trait,
{
Err(SealMediumError::PublicationIdNotSupported)
}
/// Returns [Self::PublicationId] for a given witness, if any; the id is
/// returned as an option. Function has default implementation doing
/// nothing and just returning [SealMediumError::PublicationIdNotSupported]
/// error.
async fn get_witness_publication_id(
&self,
_witness: &SEAL::Witness,
) -> Result<Option<Self::PublicationId>, SealMediumError<Self::Error>>
where
SEAL: 'async_trait,
{
Err(SealMediumError::PublicationIdNotSupported)
}
/// Validates whether a given publication id is present in the medium.
/// Function has default implementation doing nothing and returning
/// [SealMediumError::PublicationIdNotSupported] error.
async fn validate_publication_id(
&self,
_publication_id: &Self::PublicationId,
) -> Result<bool, SealMediumError<Self::Error>>
where
SEAL: 'async_trait,
{
Err(SealMediumError::PublicationIdNotSupported)
}
}
/// Single-use-seal status returned by [SealMedium::get_seal_status] and
/// [AsyncSealMedium::get_seal_status] functions.
///
/// NB: It's important to note, that while its possible to deterministically
/// define was a given seal closed it yet may be not possible to find out
/// if the seal is open without provision of the message and witness; i.e.
/// seal status may be either "closed over message"
/// or "unknown". Some specific implementations of single-use-seals may define
/// procedure to deterministically prove that a given seal is not closed (i.e.
/// opened), however this is not a part of the specification and we should
/// not rely on the existence of such possibility in all cases.
#[derive(Clone, Copy, Debug, Display)]
#[display_from(Debug)]
#[repr(u8)]
pub enum SealStatus {
/// It is unknown/undetermined whether the seal was closed
Undefined = 0,
/// The seal is closed
Closed = 1,
}
/// Error returned by [SealMedium] and [AsyncSealMedium] functions related
/// to work with publication id ([SealMedium::PublicationId]). Required since
/// not all implementation of [SealMedia] may define publication identifier,
/// and the traits provide default implementation for these functions always
/// returning [SealMediumError::OperationNotSupported]. If the implementation
/// would like to provide custom implementation, it may embed standard error
/// related to [SealMedium] operations within [SealMediumError::MediumAccessError]
/// case; the type of MediumAccessError is defined through generic argument
/// to [SealMediumError].
#[derive(Clone, Copy, Debug, Display)]
#[display_from(Debug)]
pub enum SealMediumError<M: std::error::Error> {
/// Can't access the publication medium
MediumAccessError(M),
/// Publication id is not supported
PublicationIdNotSupported,
}
| 43.810631 | 90 | 0.714264 |
2fdca5b91787af2373e5b94c30e168ef1332fc8f
| 82,134 |
//! Models relating to guilds and types that it owns.
mod emoji;
mod guild_id;
mod integration;
mod member;
mod partial_guild;
mod role;
mod audit_log;
mod premium_tier;
pub use self::emoji::*;
pub use self::guild_id::*;
pub use self::integration::*;
pub use self::member::*;
pub use self::partial_guild::*;
pub use self::role::*;
pub use self::audit_log::*;
pub use self::premium_tier::*;
use chrono::{DateTime, Utc};
use crate::model::prelude::*;
use serde::de::Error as DeError;
use super::utils::*;
use futures::stream::StreamExt;
#[cfg(all(feature = "cache", feature = "model"))]
use crate::cache::Cache;
#[cfg(all(feature = "http", feature = "model"))]
use serde_json::json;
#[cfg(feature = "model")]
use crate::builder::{CreateChannel, EditGuild, EditMember, EditRole};
#[cfg(feature = "model")]
use crate::constants::LARGE_THRESHOLD;
#[cfg(feature = "model")]
use tracing::{error, warn};
#[cfg(feature = "model")]
use crate::http::{Http, CacheHttp};
/// A representation of a banning of a user.
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)]
pub struct Ban {
/// The reason given for this ban.
pub reason: Option<String>,
/// The user that was banned.
pub user: User,
}
/// Information about a Discord guild, such as channels, emojis, etc.
#[derive(Clone, Debug, Serialize)]
#[non_exhaustive]
pub struct Guild {
/// Id of a voice channel that's considered the AFK channel.
pub afk_channel_id: Option<ChannelId>,
/// The amount of seconds a user can not show any activity in a voice
/// channel before being moved to an AFK channel -- if one exists.
pub afk_timeout: u64,
/// Application ID of the guild creator if it is bot-created.
pub application_id: Option<ApplicationId>,
/// All voice and text channels contained within a guild.
///
/// This contains all channels regardless of permissions (i.e. the ability
/// of the bot to read from or connect to them).
#[serde(serialize_with = "serialize_gen_map")]
pub channels: HashMap<ChannelId, GuildChannel>,
/// Indicator of whether notifications for all messages are enabled by
/// default in the guild.
pub default_message_notifications: DefaultMessageNotificationLevel,
/// All of the guild's custom emojis.
#[serde(serialize_with = "serialize_gen_map")]
pub emojis: HashMap<EmojiId, Emoji>,
/// Default explicit content filter level.
pub explicit_content_filter: ExplicitContentFilter,
/// VIP features enabled for the guild. Can be obtained through the
/// [Discord Partnership] website.
///
/// The following is a list of known features:
///
/// - `INVITE_SPLASH`
/// - `VANITY_URL`
/// - `VERIFIED`
/// - `VIP_REGIONS`
/// - `PARTNERED`
/// - `MORE_EMOJI`
/// - `DISCOVERABLE`
/// - `FEATURABLE`
/// - `COMMERCE`
/// - `PUBLIC`
/// - `NEWS`
/// - `BANNER`
/// - `ANIMATED_ICON`
/// - `PUBLIC_DISABLED`
/// - `COMMUNITY`
/// - `WELCOME_SCREEN_ENABLED`
///
/// [Discord Partnership]: https://discord.com/partners
pub features: Vec<String>,
/// The hash of the icon used by the guild.
///
/// In the client, this appears on the guild list on the left-hand side.
pub icon: Option<String>,
/// The unique Id identifying the guild.
///
/// This is equivilant to the Id of the default role (`@everyone`) and also
/// that of the default channel (typically `#general`).
pub id: GuildId,
/// The date that the current user joined the guild.
pub joined_at: DateTime<Utc>,
/// Indicator of whether the guild is considered "large" by Discord.
pub large: bool,
/// The number of members in the guild.
pub member_count: u64,
/// Users who are members of the guild.
///
/// Members might not all be available when the [`ReadyEvent`] is received
/// if the [`member_count`] is greater than the `LARGE_THRESHOLD` set by
/// the library.
///
/// [`member_count`]: Self::member_count
#[serde(serialize_with = "serialize_gen_map")]
pub members: HashMap<UserId, Member>,
/// Indicator of whether the guild requires multi-factor authentication for
/// [`Role`]s or [`User`]s with moderation permissions.
pub mfa_level: MfaLevel,
/// The name of the guild.
pub name: String,
/// The Id of the [`User`] who owns the guild.
pub owner_id: UserId,
/// A mapping of [`User`]s' Ids to their current presences.
#[serde(serialize_with = "serialize_gen_map")]
pub presences: HashMap<UserId, Presence>,
/// The region that the voice servers that the guild uses are located in.
pub region: String,
/// A mapping of the guild's roles.
#[serde(serialize_with = "serialize_gen_map")]
pub roles: HashMap<RoleId, Role>,
/// An identifying hash of the guild's splash icon.
///
/// If the [`"InviteSplash"`] feature is enabled, this can be used to generate
/// a URL to a splash image.
pub splash: Option<String>,
/// The ID of the channel to which system messages are sent.
pub system_channel_id: Option<ChannelId>,
/// Indicator of the current verification level of the guild.
pub verification_level: VerificationLevel,
/// A mapping of [`User`]s to their current voice state.
#[serde(serialize_with = "serialize_gen_map")]
pub voice_states: HashMap<UserId, VoiceState>,
/// The server's description
pub description: Option<String>,
/// The server's premium boosting level.
#[serde(default)]
pub premium_tier: PremiumTier,
/// The total number of users currently boosting this server.
#[serde(default)]
pub premium_subscription_count: u64,
/// The server's banner.
pub banner: Option<String>,
/// The vanity url code for the guild.
pub vanity_url_code: Option<String>,
/// The preferred locale of this guild only set if guild has the "DISCOVERABLE"
/// feature, defaults to en-US.
pub preferred_locale: String,
}
#[cfg(feature = "model")]
impl Guild {
#[cfg(feature = "cache")]
async fn check_hierarchy(&self, cache: impl AsRef<Cache>, other_user: UserId) -> Result<()> {
let current_id = cache.as_ref().current_user().await.id;
if let Some(higher) = self.greater_member_hierarchy(&cache, other_user, current_id).await {
if higher != current_id {
return Err(Error::Model(ModelError::Hierarchy));
}
}
Ok(())
}
/// Returns the "default" channel of the guild for the passed user id.
/// (This returns the first channel that can be read by the user, if there isn't one,
/// returns `None`)
pub async fn default_channel(&self, uid: UserId) -> Option<&GuildChannel> {
for (cid, channel) in &self.channels {
if self.user_permissions_in(*cid, uid).read_messages() {
return Some(channel);
}
}
None
}
/// Returns the guaranteed "default" channel of the guild.
/// (This returns the first channel that can be read by everyone, if there isn't one,
/// returns `None`)
/// Note however that this is very costy if used in a server with lots of channels,
/// members, or both.
pub async fn default_channel_guaranteed(&self) -> Option<&GuildChannel> {
for (cid, channel) in &self.channels {
for memid in self.members.keys() {
if self.user_permissions_in(*cid, *memid).read_messages() {
return Some(channel);
}
}
}
None
}
#[cfg(feature = "cache")]
async fn has_perms(&self, cache_http: impl CacheHttp, mut permissions: Permissions) -> bool {
if let Some(cache) = cache_http.cache() {
let user_id = cache.current_user().await.id;
if let Ok(perms) = self.member_permissions(&cache_http, user_id).await {
permissions.remove(perms);
permissions.is_empty()
} else {
false
}
} else {
false
}
}
#[cfg(feature = "cache")]
pub async fn channel_id_from_name(&self, cache: impl AsRef<Cache>, name: impl AsRef<str>) -> Option<ChannelId> {
let name = name.as_ref();
let guild_channels = cache
.as_ref()
.guild_channels(&self.id)
.await?;
for (id, channel) in guild_channels.iter() {
if channel.name == name {
return Some(*id)
}
}
None
}
/// Ban a [`User`] from the guild, deleting a number of
/// days' worth of messages (`dmd`) between the range 0 and 7.
///
/// Refer to the documentation for [`Guild::ban`] for more information.
///
/// **Note**: Requires the [Ban Members] permission.
///
/// # Examples
///
/// Ban a member and remove all messages they've sent in the last 4 days:
///
/// ```rust,ignore
/// // assumes a `user` and `guild` have already been bound
/// let _ = guild.ban(user, 4);
/// ```
///
/// # Errors
///
/// Returns a [`ModelError::InvalidPermissions`] if the current user does
/// not have permission to perform bans.
///
/// Returns a [`ModelError::DeleteMessageDaysAmount`] if the number of
/// days' worth of messages to delete is over the maximum.
///
/// [Ban Members]: Permissions::BAN_MEMBERS
#[inline]
pub async fn ban(&self, cache_http: impl CacheHttp, user: impl Into<UserId>, dmd: u8) -> Result<()> {
self._ban_with_reason(cache_http, user.into(), dmd, "").await
}
/// Ban a [`User`] from the guild with a reason. Refer to [`ban`] to further documentation.
///
/// [`ban`]: Self::ban
#[inline]
pub async fn ban_with_reason(
&self,
cache_http: impl CacheHttp,
user: impl Into<UserId>,
dmd: u8,
reason: impl AsRef<str>) -> Result<()> {
self._ban_with_reason(cache_http, user.into(), dmd, reason.as_ref()).await
}
async fn _ban_with_reason(&self, cache_http: impl CacheHttp, user: UserId, dmd: u8, reason: &str) -> Result<()> {
#[cfg(feature = "cache")]
{
if let Some(cache) = cache_http.cache() {
let req = Permissions::BAN_MEMBERS;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
self.check_hierarchy(cache, user).await?;
}
}
self.id.ban_with_reason(cache_http.http(), user, dmd, reason).await
}
/// Returns the formatted URL of the guild's banner image, if one exists.
pub fn banner_url(&self) -> Option<String> {
self.banner
.as_ref()
.map(|banner| format!(cdn!("/banners/{}/{}.webp?size=1024"), self.id, banner))
}
/// Retrieves a list of [`Ban`]s for the guild.
///
/// **Note**: Requires the [Ban Members] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Ban Members]: Permissions::BAN_MEMBERS
pub async fn bans(&self, cache_http: impl CacheHttp) -> Result<Vec<Ban>> {
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::BAN_MEMBERS;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.bans(cache_http.http()).await
}
/// Retrieves a list of [`AuditLogs`] for the guild.
#[inline]
pub async fn audit_logs(
&self,
http: impl AsRef<Http>,
action_type: Option<u8>,
user_id: Option<UserId>,
before: Option<AuditLogEntryId>,
limit: Option<u8>
) -> Result<AuditLogs> {
self.id.audit_logs(&http, action_type, user_id, before, limit).await
}
/// Gets all of the guild's channels over the REST API.
#[inline]
pub async fn channels(&self, http: impl AsRef<Http>) -> Result<HashMap<ChannelId, GuildChannel>> {
self.id.channels(&http).await
}
/// Creates a guild with the data provided.
///
/// Only a [`PartialGuild`] will be immediately returned, and a full
/// [`Guild`] will be received over a [`Shard`].
///
/// **Note**: This endpoint is usually only available for user accounts.
/// Refer to Discord's information for the endpoint [here][whitelist] for
/// more information. If you require this as a bot, re-think what you are
/// doing and if it _really_ needs to be doing this.
///
/// # Examples
///
/// Create a guild called `"test"` in the [US West region] with no icon:
///
/// ```rust,ignore
/// use serenity::model::{Guild, Region};
///
/// let _guild = Guild::create_guild(&http, "test", Region::UsWest, None).await;
/// ```
///
/// [`Shard`]: crate::gateway::Shard
/// [US West region]: Region::UsWest
/// [whitelist]: https://discord.com/developers/docs/resources/guild#create-guild
pub async fn create(http: impl AsRef<Http>, name: &str, region: Region, icon: Option<&str>) -> Result<PartialGuild> {
let map = json!({
"icon": icon,
"name": name,
"region": region.name(),
});
http.as_ref().create_guild(&map).await
}
/// Creates a new [`Channel`] in the guild.
///
/// **Note**: Requires the [Manage Channels] permission.
///
/// # Examples
///
/// ```rust,ignore
/// use serenity::model::ChannelType;
///
/// // assuming a `guild` has already been bound
///
/// let _ = guild
/// .create_channel(&http, |c| c.name("my-test-channel").kind(ChannelType::Text))
/// .await;
/// ```
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Manage Channels]: Permissions::MANAGE_CHANNELS
pub async fn create_channel(&self, cache_http: impl CacheHttp, f: impl FnOnce(&mut CreateChannel) -> &mut CreateChannel) -> Result<GuildChannel> {
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::MANAGE_CHANNELS;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.create_channel(cache_http.http(), f).await
}
/// Creates an emoji in the guild with a name and base64-encoded image. The
/// [`utils::read_image`] function is provided for you as a simple method to
/// read an image and encode it into base64, if you are reading from the
/// filesystem.
///
/// The name of the emoji must be at least 2 characters long and can only
/// contain alphanumeric characters and underscores.
///
/// Requires the [Manage Emojis] permission.
///
/// # Examples
///
/// See the [`EditProfile::avatar`] example for an in-depth example as to
/// how to read an image from the filesystem and encode it as base64. Most
/// of the example can be applied similarly for this method.
///
/// [`EditProfile::avatar`]: crate::builder::EditProfile::avatar
/// [`utils::read_image`]: crate::utils::read_image
/// [Manage Emojis]: Permissions::MANAGE_EMOJIS
#[inline]
pub async fn create_emoji(&self, http: impl AsRef<Http>, name: &str, image: &str) -> Result<Emoji> {
self.id.create_emoji(&http, name, image).await
}
/// Creates an integration for the guild.
///
/// Requires the [Manage Guild] permission.
///
/// [Manage Guild]: Permissions::MANAGE_GUILD
#[inline]
pub async fn create_integration<I>(&self, http: impl AsRef<Http>, integration_id: impl Into<IntegrationId>, kind: &str) -> Result<()> {
self.id.create_integration(&http, integration_id, kind).await
}
/// Creates a new role in the guild with the data set, if any.
///
/// **Note**: Requires the [Manage Roles] permission.
///
/// # Examples
///
/// Create a role which can be mentioned, with the name 'test':
///
/// ```rust,ignore
/// // assuming a `guild` has been bound
///
/// let role = guild.create_role(&http, |r| r.hoist(true).name("role")).await;
/// ```
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Manage Roles]: Permissions::MANAGE_ROLES
pub async fn create_role<F>(&self, cache_http: impl CacheHttp, f: F) -> Result<Role>
where F: FnOnce(&mut EditRole) -> &mut EditRole
{
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::MANAGE_ROLES;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.create_role(cache_http.http(), f).await
}
/// Deletes the current guild if the current user is the owner of the
/// guild.
///
/// **Note**: Requires the current user to be the owner of the guild.
///
/// # Errors
///
/// If the `cache` is enabled, then returns a [`ModelError::InvalidUser`]
/// if the current user is not the guild owner.
pub async fn delete(&self, cache_http: impl CacheHttp) -> Result<PartialGuild> {
#[cfg(feature = "cache")]
{
if let Some(cache) = cache_http.cache() {
if self.owner_id != cache.current_user().await.id {
let req = Permissions::MANAGE_GUILD;
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.delete(cache_http.http()).await
}
/// Deletes an [`Emoji`] from the guild.
///
/// Requires the [Manage Emojis] permission.
///
/// [Manage Emojis]: Permissions::MANAGE_EMOJIS
#[inline]
pub async fn delete_emoji(&self, http: impl AsRef<Http>, emoji_id: impl Into<EmojiId>) -> Result<()> {
self.id.delete_emoji(&http, emoji_id).await
}
/// Deletes an integration by Id from the guild.
///
/// Requires the [Manage Guild] permission.
///
/// [Manage Guild]: Permissions::MANAGE_GUILD
#[inline]
pub async fn delete_integration(&self, http: impl AsRef<Http>, integration_id: impl Into<IntegrationId>) -> Result<()> {
self.id.delete_integration(&http, integration_id).await
}
/// Deletes a [`Role`] by Id from the guild.
///
/// Also see [`Role::delete`] if you have the `cache` and `methods` features
/// enabled.
///
/// Requires the [Manage Roles] permission.
///
/// [Manage Roles]: Permissions::MANAGE_ROLES
#[inline]
pub async fn delete_role(&self, http: impl AsRef<Http>, role_id: impl Into<RoleId>) -> Result<()> {
self.id.delete_role(&http, role_id).await
}
/// Edits the current guild with new data where specified.
///
/// Refer to `EditGuild`'s documentation for a full list of methods.
///
/// **Note**: Requires the current user to have the [Manage Guild]
/// permission.
///
/// # Examples
///
/// Change a guild's icon using a file name "icon.png":
///
/// ```rust,ignore
/// use serenity::utils;
///
/// // We are using read_image helper function from utils.
/// let base64_icon = utils::read_image("./icon.png")
/// .expect("Failed to read image");
///
/// guild.edit(|g| g.icon(base64_icon));
/// ```
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Manage Guild]: Permissions::MANAGE_GUILD
pub async fn edit<F>(&mut self, cache_http: impl CacheHttp, f: F) -> Result<()>
where F: FnOnce(&mut EditGuild) -> &mut EditGuild
{
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::MANAGE_GUILD;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
match self.id.edit(cache_http.http(), f).await {
Ok(guild) => {
self.afk_channel_id = guild.afk_channel_id;
self.afk_timeout = guild.afk_timeout;
self.default_message_notifications = guild.default_message_notifications;
self.emojis = guild.emojis;
self.features = guild.features;
self.icon = guild.icon;
self.mfa_level = guild.mfa_level;
self.name = guild.name;
self.owner_id = guild.owner_id;
self.region = guild.region;
self.roles = guild.roles;
self.splash = guild.splash;
self.verification_level = guild.verification_level;
Ok(())
},
Err(why) => Err(why),
}
}
/// Edits an [`Emoji`]'s name in the guild.
///
/// Also see [`Emoji::edit`] if you have the `cache` and `methods` features
/// enabled.
///
/// Requires the [Manage Emojis] permission.
///
/// [Manage Emojis]: Permissions::MANAGE_EMOJIS
#[inline]
pub async fn edit_emoji(&self, http: impl AsRef<Http>, emoji_id: impl Into<EmojiId>, name: &str) -> Result<Emoji> {
self.id.edit_emoji(&http, emoji_id, name).await
}
/// Edits the properties of member of the guild, such as muting or
/// nicknaming them. Returns the new member.
///
/// Refer to `EditMember`'s documentation for a full list of methods and
/// permission restrictions.
///
/// # Examples
///
/// Mute a member and set their roles to just one role with a predefined Id:
///
/// ```rust,ignore
/// guild.edit_member(user_id, |m| m.mute(true).roles(&vec![role_id]));
/// ```
#[inline]
pub async fn edit_member<F>(&self, http: impl AsRef<Http>, user_id: impl Into<UserId>, f: F) -> Result<Member>
where F: FnOnce(&mut EditMember) -> &mut EditMember
{
self.id.edit_member(&http, user_id, f).await
}
/// Edits the current user's nickname for the guild.
///
/// Pass `None` to reset the nickname.
///
/// **Note**: Requires the [Change Nickname] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to change their own
/// nickname.
///
/// [Change Nickname]: Permissions::CHANGE_NICKNAME
pub async fn edit_nickname(&self, cache_http: impl CacheHttp, new_nickname: Option<&str>) -> Result<()> {
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::CHANGE_NICKNAME;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.edit_nickname(cache_http.http(), new_nickname).await
}
/// Edits a role, optionally setting its fields.
///
/// Requires the [Manage Roles] permission.
///
/// # Examples
///
/// Make a role hoisted:
///
/// ```rust,ignore
/// guild.edit_role(&context, RoleId(7), |r| r.hoist(true));
/// ```
///
/// [Manage Roles]: Permissions::MANAGE_ROLES
#[inline]
pub async fn edit_role<F>(&self, http: impl AsRef<Http>, role_id: impl Into<RoleId>, f: F) -> Result<Role>
where F: FnOnce(&mut EditRole) -> &mut EditRole
{
self.id.edit_role(&http, role_id, f).await
}
/// Edits the order of [`Role`]s
/// Requires the [Manage Roles] permission.
///
/// # Examples
///
/// Change the order of a role:
///
/// ```rust,ignore
/// use serenity::model::id::RoleId;
/// guild.edit_role_position(&context, RoleId(8), 2);
/// ```
///
/// [Manage Roles]: Permissions::MANAGE_ROLES
#[inline]
pub async fn edit_role_position(
&self,
http: impl AsRef<Http>,
role_id: impl Into<RoleId>,
position: u64
) -> Result<Vec<Role>> {
self.id.edit_role_position(&http, role_id, position).await
}
/// Gets a partial amount of guild data by its Id.
///
/// Requires that the current user be in the guild.
#[inline]
pub async fn get(http: impl AsRef<Http>, guild_id: impl Into<GuildId>) -> Result<PartialGuild> {
guild_id.into().to_partial_guild(&http).await
}
/// Returns which of two [`User`]s has a higher [`Member`] hierarchy.
///
/// Hierarchy is essentially who has the [`Role`] with the highest
/// [`position`].
///
/// Returns [`None`] if at least one of the given users' member instances
/// is not present. Returns `None` if the users have the same hierarchy, as
/// neither are greater than the other.
///
/// If both user IDs are the same, `None` is returned. If one of the users
/// is the guild owner, their ID is returned.
///
/// [`position`]: Role::position
#[cfg(feature = "cache")]
#[inline]
pub async fn greater_member_hierarchy(
&self,
cache: impl AsRef<Cache>,
lhs_id: impl Into<UserId>,
rhs_id: impl Into<UserId>
) -> Option<UserId> {
self._greater_member_hierarchy(&cache, lhs_id.into(), rhs_id.into()).await
}
#[cfg(feature = "cache")]
async fn _greater_member_hierarchy(
&self,
cache: impl AsRef<Cache>,
lhs_id: UserId,
rhs_id: UserId,
) -> Option<UserId> {
// Check that the IDs are the same. If they are, neither is greater.
if lhs_id == rhs_id {
return None;
}
// Check if either user is the guild owner.
if lhs_id == self.owner_id {
return Some(lhs_id);
} else if rhs_id == self.owner_id {
return Some(rhs_id);
}
let lhs = self.members.get(&lhs_id)?
.highest_role_info(&cache)
.await
.unwrap_or((RoleId(0), 0));
let rhs = self.members.get(&rhs_id)?
.highest_role_info(&cache)
.await
.unwrap_or((RoleId(0), 0));
// If LHS and RHS both have no top position or have the same role ID,
// then no one wins.
if (lhs.1 == 0 && rhs.1 == 0) || (lhs.0 == rhs.0) {
return None;
}
// If LHS's top position is higher than RHS, then LHS wins.
if lhs.1 > rhs.1 {
return Some(lhs_id)
}
// If RHS's top position is higher than LHS, then RHS wins.
if rhs.1 > lhs.1 {
return Some(rhs_id);
}
// If LHS and RHS both have the same position, but LHS has the lower
// role ID, then LHS wins.
//
// If RHS has the higher role ID, then RHS wins.
if lhs.1 == rhs.1 && lhs.0 < rhs.0 {
Some(lhs_id)
} else {
Some(rhs_id)
}
}
/// Returns the formatted URL of the guild's icon, if one exists.
///
/// This will produce a WEBP image URL, or GIF if the guild has a GIF icon.
pub fn icon_url(&self) -> Option<String> {
self.icon
.as_ref()
.map(|icon| {
let ext = if icon.starts_with("a_") {
"gif"
} else {
"webp"
};
format!(cdn!("/icons/{}/{}.{}"), self.id, icon, ext)
})
}
/// Gets all [`Emoji`]s of this guild via HTTP.
#[inline]
pub async fn emojis(&self, http: impl AsRef<Http>) -> Result<Vec<Emoji>> {
self.id.emojis(http).await
}
/// Gets an [`Emoji`] of this guild by its ID via HTTP.
#[inline]
pub async fn emoji(&self, http: impl AsRef<Http>, emoji_id: EmojiId) -> Result<Emoji> {
self.id.emoji(http, emoji_id).await
}
/// Gets all integration of the guild.
///
/// This performs a request over the REST API.
#[inline]
pub async fn integrations(&self, http: impl AsRef<Http>) -> Result<Vec<Integration>> {
self.id.integrations(&http).await
}
/// Retrieves the active invites for the guild.
///
/// **Note**: Requires the [Manage Guild] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Manage Guild]: Permissions::MANAGE_GUILD
pub async fn invites(&self, cache_http: impl CacheHttp) -> Result<Vec<RichInvite>> {
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::MANAGE_GUILD;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.invites(cache_http.http()).await
}
/// Checks if the guild is 'large'. A guild is considered large if it has
/// more than 250 members.
#[inline]
pub fn is_large(&self) -> bool { self.members.len() > LARGE_THRESHOLD as usize }
/// Kicks a [`Member`] from the guild.
///
/// Requires the [Kick Members] permission.
///
/// [Kick Members]: Permissions::KICK_MEMBERS
#[inline]
pub async fn kick(&self, http: impl AsRef<Http>, user_id: impl Into<UserId>) -> Result<()> {
self.id.kick(&http, user_id).await
}
#[inline]
pub async fn kick_with_reason(
&self,
http: impl AsRef<Http>,
user_id: impl Into<UserId>,
reason: &str
) -> Result<()> {
self.id.kick_with_reason(&http, user_id, reason).await
}
/// Leaves the guild.
#[inline]
pub async fn leave(&self, http: impl AsRef<Http>) -> Result<()> {
self.id.leave(&http).await
}
/// Gets a user's [`Member`] for the guild by Id.
#[inline]
pub async fn member(&self, cache_http: impl CacheHttp, user_id: impl Into<UserId>) -> Result<Member> {
self.id.member(cache_http, user_id).await
}
/// Gets a list of the guild's members.
///
/// Optionally pass in the `limit` to limit the number of results.
/// Minimum value is 1, maximum and default value is 1000.
/// <br>
/// Optionally pass in `after` to offset the results by a [`User`]'s Id.
///
/// [`User`]: ../user/struct.User.html
#[inline]
pub async fn members(
&self,
http: impl AsRef<Http>,
limit: Option<u64>,
after: impl Into<Option<UserId>>
) -> Result<Vec<Member>> {
self.id.members(&http, limit, after).await
}
/// Gets a list of all the members (satisfying the status provided to the function) in this
/// guild.
pub fn members_with_status(&self, status: OnlineStatus) -> Vec<&Member> {
let mut members = vec![];
for (&id, member) in &self.members {
if let Some(presence) = self.presences.get(&id) {
if status == presence.status {
members.push(member);
}
}
}
members
}
/// Retrieves the first [`Member`] found that matches the name - with an
/// optional discriminator - provided.
///
/// Searching with a discriminator given is the most precise form of lookup,
/// as no two people can share the same username *and* discriminator.
///
/// If a member can not be found by username or username#discriminator,
/// then a search will be done for the nickname. When searching by nickname,
/// the hash (`#`) and everything after it is included in the search.
///
/// The following are valid types of searches:
///
/// - **username**: "zey"
/// - **username and discriminator**: "zey#5479"
pub fn member_named(&self, name: &str) -> Option<&Member> {
let (name, discrim) = if let Some(pos) = name.rfind('#') {
let split = name.split_at(pos + 1);
let split2 = (
match split.0.get(0..split.0.len() - 1) {
Some(s) => s,
None => "",
},
split.1,
);
match split2.1.parse::<u16>() {
Ok(discrim_int) => (split2.0, Some(discrim_int)),
Err(_) => (name, None),
}
} else {
(&name[..], None)
};
for member in self.members.values() {
let name_matches = member.user.name == name;
let discrim_matches = match discrim {
Some(discrim) => member.user.discriminator == discrim,
None => true,
};
if name_matches && discrim_matches {
return Some(member);
}
}
self.members
.values()
.find(|member| member.nick.as_ref().map_or(false, |nick| nick == name))
}
/// Retrieves all [`Member`] that start with a given `String`.
///
/// `sorted` decides whether the best early match of the `prefix`
/// should be the criteria to sort the result.
/// For the `prefix` "zey" and the unsorted result:
/// - "zeya", "zeyaa", "zeyla", "zeyzey", "zeyzeyzey"
/// It would be sorted:
/// - "zeya", "zeyaa", "zeyla", "zeyzey", "zeyzeyzey"
///
/// **Locking**:
/// First collects a [`Member`]'s [`User`]-name by read-locking all inner
/// [`User`]s, and then sorts. This ensures that no name is being changed
/// after being sorted in the originally correct position.
/// However, since the read-locks are dropped after borrowing the name,
/// the names might have been changed by the user, the sorted list cannot
/// account for this.
pub async fn members_starting_with(&self, prefix: &str, case_sensitive: bool, sorted: bool) -> Vec<(&Member, String)> {
fn starts_with(prefix: &str, case_sensitive: bool, name: &str) -> bool {
case_sensitive && name.starts_with(prefix)
|| !case_sensitive && starts_with_case_insensitive(name, prefix)
}
let mut members = futures::stream::iter(self.members.values())
.filter_map(|member| async move {
let username = &member.user.name;
if starts_with(prefix, case_sensitive, username) {
Some((member, username.to_string()))
} else {
match member.nick {
Some(ref nick) => {
if starts_with(prefix, case_sensitive, nick) {
Some((member, nick.to_string()))
} else {
None
}
},
None => None,
}
}
}).collect::<Vec<(&Member, String)>>()
.await;
if sorted {
members
.sort_by(|a, b| {
closest_to_origin(prefix, &a.1[..], &b.1[..])
});
members
} else {
members
}
}
/// Retrieves all [`Member`] containing a given `String` as
/// either username or nick, with a priority on username.
///
/// If the substring is "yla", following results are possible:
/// - "zeyla", "meiyla", "yladenisyla"
/// If 'case_sensitive' is false, the following are not found:
/// - "zeYLa", "meiyLa", "LYAdenislyA"
///
/// `sorted` decides whether the best early match of the search-term
/// should be the criteria to sort the result.
/// It will look at the account name first, if that does not fit the
/// search-criteria `substring`, the display-name will be considered.
/// For the `substring` "zey" and the unsorted result:
/// - "azey", "zey", "zeyla", "zeylaa", "zeyzeyzey"
/// It would be sorted:
/// - "zey", "azey", "zeyla", "zeylaa", "zeyzeyzey"
///
/// **Note**: Due to two fields of a `Member` being candidates for
/// the searched field, setting `sorted` to `true` will result in an overhead,
/// as both fields have to be considered again for sorting.
///
/// **Locking**:
/// First collects a [`Member`]'s [`User`]-name by read-locking all inner
/// [`User`]s, and then sorts. This ensures that no name is being changed
/// after being sorted in the originally correct position.
/// However, since the read-locks are dropped after borrowing the name,
/// the names might have been changed by the user, the sorted list cannot
/// account for this.
pub async fn members_containing(&self, substring: &str, case_sensitive: bool, sorted: bool) -> Vec<(&Member, String)> {
fn contains(substring: &str, case_sensitive: bool, name: &str) -> bool {
case_sensitive && name.contains(substring)
|| !case_sensitive && contains_case_insensitive(name, substring)
}
let mut members = futures::stream::iter(self.members
.values())
.filter_map(|member| async move {
let username = &member.user.name;
if contains(substring, case_sensitive, username) {
Some((member, username.to_string()))
} else {
match member.nick {
Some(ref nick) => {
if contains(substring, case_sensitive, nick) {
Some((member, nick.to_string()))
} else {
None
}
},
None => None
}
}
}).collect::<Vec<(&Member, String)>>()
.await;
if sorted {
members
.sort_by(|a, b| {
closest_to_origin(substring, &a.1[..], &b.1[..])
});
members
} else {
members
}
}
/// Retrieves a tuple of [`Member`]s containing a given `String` in
/// their username as the first field and the name used for sorting
/// as the second field.
///
/// If the substring is "yla", following results are possible:
/// - "zeyla", "meiyla", "yladenisyla"
/// If 'case_sensitive' is false, the following are not found:
/// - "zeYLa", "meiyLa", "LYAdenislyA"
///
/// `sort` decides whether the best early match of the search-term
/// should be the criteria to sort the result.
/// For the `substring` "zey" and the unsorted result:
/// - "azey", "zey", "zeyla", "zeylaa", "zeyzeyzey"
/// It would be sorted:
/// - "zey", "azey", "zeyla", "zeylaa", "zeyzeyzey"
///
/// **Locking**:
/// First collects a [`Member`]'s [`User`]-name by read-locking all inner
/// [`User`]s, and then sorts. This ensures that no name is being changed
/// after being sorted in the originally correct position.
/// However, since the read-locks are dropped after borrowing the name,
/// the names might have been changed by the user, the sorted list cannot
/// account for this.
pub async fn members_username_containing(&self, substring: &str, case_sensitive: bool, sorted: bool) -> Vec<(&Member, String)> {
let mut members = futures::stream::iter(self.members
.values())
.filter_map(|member| async move {
if case_sensitive {
let name = &member.user.name;
if name.contains(substring) {
Some((member, name.to_string()))
} else {
None
}
} else {
let name = &member.user.name;
if contains_case_insensitive(name, substring) {
Some((member, name.to_string()))
} else {
None
}
}
}).collect::<Vec<(&Member, String)>>()
.await;
if sorted {
members
.sort_by(|a, b| {
closest_to_origin(substring, &a.1[..], &b.1[..])
});
members
} else {
members
}
}
/// Retrieves all [`Member`] containing a given `String` in
/// their nick.
///
/// If the substring is "yla", following results are possible:
/// - "zeyla", "meiyla", "yladenisyla"
/// If 'case_sensitive' is false, the following are not found:
/// - "zeYLa", "meiyLa", "LYAdenislyA"
///
/// `sort` decides whether the best early match of the search-term
/// should be the criteria to sort the result.
/// For the `substring` "zey" and the unsorted result:
/// - "azey", "zey", "zeyla", "zeylaa", "zeyzeyzey"
/// It would be sorted:
/// - "zey", "azey", "zeyla", "zeylaa", "zeyzeyzey"
///
/// **Note**: Instead of panicing, when sorting does not find
/// a nick, the username will be used (this should never happen).
///
/// **Locking**:
/// First collects a [`Member`]'s nick directly or by read-locking all inner
/// [`User`]s (in case of no nick, see note above), and then sorts.
/// This ensures that no name is being changed after being sorted in the
/// originally correct position.
/// However, since the read-locks are dropped after borrowing the name,
/// the names might have been changed by the user, the sorted list cannot
/// account for this.
pub async fn members_nick_containing(&self, substring: &str, case_sensitive: bool, sorted: bool) -> Vec<(&Member, String)> {
let mut members = futures::stream::iter(self.members
.values())
.filter_map(|member| async move {
let nick = match member.nick {
Some(ref nick) => nick.to_string(),
None => member.user.name.to_string(),
};
if case_sensitive && nick.contains(substring)
|| !case_sensitive && contains_case_insensitive(&nick, substring) {
Some((member, nick))
} else {
None
}
}).collect::<Vec<(&Member, String)>>()
.await;
if sorted {
members
.sort_by(|a, b| {
closest_to_origin(substring, &a.1[..], &b.1[..])
});
members
} else {
members
}
}
/// Calculate a [`Member`]'s permissions in the guild.
///
/// If member caching is enabled the cache will be checked
/// first. If not found it will resort to an http request.
///
/// Cache is still required to look up roles.
#[inline]
#[cfg(feature = "cache")]
pub async fn member_permissions(&self, cache_http: impl CacheHttp, user_id: impl Into<UserId>) -> Result<Permissions> {
self._member_permissions(cache_http, user_id.into()).await
}
#[cfg(feature = "cache")]
async fn _member_permissions(&self, cache_http: impl CacheHttp, user_id: UserId) -> Result<Permissions> {
if user_id == self.owner_id {
return Ok(Permissions::all());
}
let everyone = match self.roles.get(&RoleId(self.id.0)) {
Some(everyone) => everyone,
None => {
error!(
"(╯°□°)╯︵ ┻━┻ @everyone role ({}) missing in '{}'",
self.id,
self.name,
);
return Ok(Permissions::empty());
},
};
let member = self.member(cache_http, &user_id).await?;
let mut permissions = everyone.permissions;
for role in &member.roles {
if let Some(role) = self.roles.get(role) {
if role.permissions.contains(Permissions::ADMINISTRATOR) {
return Ok(Permissions::all());
}
permissions |= role.permissions;
} else {
warn!(
"(╯°□°)╯︵ ┻━┻ {} on {} has non-existent role {:?}",
member.user.id,
self.id,
role,
);
}
}
Ok(permissions)
}
/// Moves a member to a specific voice channel.
///
/// Requires the [Move Members] permission.
///
/// [Move Members]: Permissions::MOVE_MEMBERS
#[inline]
pub async fn move_member(&self, http: impl AsRef<Http>, user_id: impl Into<UserId>, channel_id: impl Into<ChannelId>) -> Result<Member> {
self.id.move_member(&http, user_id, channel_id).await
}
/// Calculate a [`User`]'s permissions in a given channel in the guild.
#[inline]
pub fn user_permissions_in(&self, channel_id: impl Into<ChannelId>, user_id: impl Into<UserId>) -> Permissions {
self._user_permissions_in(channel_id.into(), user_id.into())
}
fn _user_permissions_in(
&self,
channel_id: ChannelId,
user_id: UserId,
) -> Permissions {
// The owner has all permissions in all cases.
if user_id == self.owner_id {
return Permissions::all();
}
// Start by retrieving the @everyone role's permissions.
let everyone = match self.roles.get(&RoleId(self.id.0)) {
Some(everyone) => everyone,
None => {
error!(
"(╯°□°)╯︵ ┻━┻ @everyone role ({}) missing in '{}'",
self.id,
self.name
);
return Permissions::empty();
},
};
// Create a base set of permissions, starting with `@everyone`s.
let mut permissions = everyone.permissions;
let member = self.members.get(&user_id);
if let Some(member) = &member {
for &role in &member.roles {
if let Some(role) = self.roles.get(&role) {
permissions |= role.permissions;
} else {
warn!(
"(╯°□°)╯︵ ┻━┻ {} on {} has non-existent role {:?}",
member.user.id,
self.id,
role
);
}
}
}
// Administrators have all permissions in any channel.
if permissions.contains(Permissions::ADMINISTRATOR) {
return Permissions::all();
}
if let Some(channel) = self.channels.get(&channel_id) {
// If this is a text channel, then throw out voice permissions.
if channel.kind == ChannelType::Text {
permissions &= !(Permissions::CONNECT
| Permissions::SPEAK
| Permissions::MUTE_MEMBERS
| Permissions::DEAFEN_MEMBERS
| Permissions::MOVE_MEMBERS
| Permissions::USE_VAD
| Permissions::STREAM);
}
// Apply the permission overwrites for the channel for each of the
// overwrites that - first - applies to the member's roles, and then
// the member itself.
//
// First apply the denied permission overwrites for each, then apply
// the allowed.
if let Some(member) = member {
let mut data = Vec::with_capacity(member.roles.len());
// Roles
for overwrite in &channel.permission_overwrites {
if let PermissionOverwriteType::Role(role) = overwrite.kind {
if role.0 != self.id.0 && !member.roles.contains(&role) {
continue;
}
if let Some(role) = self.roles.get(&role) {
data.push((role.position, overwrite.deny, overwrite.allow));
}
}
}
data.sort_by(|a, b| a.0.cmp(&b.0));
for overwrite in data {
permissions = (permissions & !overwrite.1) | overwrite.2;
}
} else {
// Apply @everyone overwrites even if member's role list is unavailable
let everyone_overwrite = channel
.permission_overwrites
.iter()
.find(|overwrite| match &overwrite.kind {
PermissionOverwriteType::Role(role) => {
role.0 == self.id.0
}
_ => false
});
if let Some(overwrite) = everyone_overwrite {
permissions = (permissions & !overwrite.deny) | overwrite.allow;
}
}
// Member
for overwrite in &channel.permission_overwrites {
if PermissionOverwriteType::Member(user_id) != overwrite.kind {
continue;
}
permissions = (permissions & !overwrite.deny) | overwrite.allow;
}
} else {
warn!(
"(╯°□°)╯︵ ┻━┻ Guild {} does not contain channel {}",
self.id,
channel_id
);
}
// The default channel is always readable.
if channel_id.0 == self.id.0 {
permissions |= Permissions::READ_MESSAGES;
}
self.remove_unusable_permissions(&mut permissions);
permissions
}
/// Calculate a [`Role`]'s permissions in a given channel in the guild.
/// Returns `None` if given `role_id` cannot be found.
#[inline]
pub fn role_permissions_in(&self, channel_id: impl Into<ChannelId>, role_id: impl Into<RoleId>) -> Option<Permissions> {
self._role_permissions_in(channel_id.into(), role_id.into())
}
fn _role_permissions_in(
&self,
channel_id: ChannelId,
role_id: RoleId,
) -> Option<Permissions> {
let mut permissions = match self.roles.get(&role_id) {
Some(role) => role.permissions,
None => return None,
};
if permissions.contains(Permissions::ADMINISTRATOR) {
return Some(Permissions::all());
}
if let Some(channel) = self.channels.get(&channel_id) {
for overwrite in &channel.permission_overwrites {
if let PermissionOverwriteType::Role(permissions_role_id) = overwrite.kind {
if permissions_role_id == role_id {
permissions = (permissions & !overwrite.deny) | overwrite.allow;
break;
}
}
}
} else {
warn!(
"(╯°□°)╯︵ ┻━┻ Guild {} does not contain channel {}",
self.id,
channel_id
);
return None;
}
self.remove_unusable_permissions(&mut permissions);
Some(permissions)
}
/// Retrieves the count of the number of [`Member`]s that would be pruned
/// with the number of given days.
///
/// See the documentation on [`GuildPrune`] for more information.
///
/// **Note**: Requires the [Kick Members] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Kick Members]: Permissions::KICK_MEMBERS
pub async fn prune_count(&self, cache_http: impl CacheHttp, days: u16) -> Result<GuildPrune> {
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::KICK_MEMBERS;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.prune_count(cache_http.http(), days).await
}
pub(crate) fn remove_unusable_permissions(&self, permissions: &mut Permissions) {
// No SEND_MESSAGES => no message-sending-related actions
// If the member does not have the `SEND_MESSAGES` permission, then
// throw out message-able permissions.
if !permissions.contains(Permissions::SEND_MESSAGES) {
*permissions &= !(Permissions::SEND_TTS_MESSAGES
| Permissions::MENTION_EVERYONE
| Permissions::EMBED_LINKS
| Permissions::ATTACH_FILES);
}
// If the permission does not have the `READ_MESSAGES` permission, then
// throw out actionable permissions.
if !permissions.contains(Permissions::READ_MESSAGES) {
*permissions &= !(Permissions::KICK_MEMBERS
| Permissions::BAN_MEMBERS
| Permissions::ADMINISTRATOR
| Permissions::MANAGE_GUILD
| Permissions::CHANGE_NICKNAME
| Permissions::MANAGE_NICKNAMES);
}
}
/// Re-orders the channels of the guild.
///
/// Although not required, you should specify all channels' positions,
/// regardless of whether they were updated. Otherwise, positioning can
/// sometimes get weird.
#[inline]
pub async fn reorder_channels<It>(&self, http: impl AsRef<Http>, channels: It) -> Result<()>
where It: IntoIterator<Item = (ChannelId, u64)>
{
self.id.reorder_channels(&http, channels).await
}
/// Returns the Id of the shard associated with the guild.
///
/// When the cache is enabled this will automatically retrieve the total
/// number of shards.
///
/// **Note**: When the cache is enabled, this function unlocks the cache to
/// retrieve the total number of shards in use. If you already have the
/// total, consider using [`utils::shard_id`].
///
/// [`utils::shard_id`]: crate::utils::shard_id
#[cfg(all(feature = "cache", feature = "utils"))]
#[inline]
pub async fn shard_id(&self, cache: impl AsRef<Cache>) -> u64 {
self.id.shard_id(&cache).await
}
/// Returns the Id of the shard associated with the guild.
///
/// When the cache is enabled this will automatically retrieve the total
/// number of shards.
///
/// When the cache is not enabled, the total number of shards being used
/// will need to be passed.
///
/// # Examples
///
/// Retrieve the Id of the shard for a guild with Id `81384788765712384`,
/// using 17 shards:
///
/// ```rust,ignore
/// use serenity::utils;
///
/// // assumes a `guild` has already been bound
///
/// assert_eq!(guild.shard_id(17), 7);
/// ```
#[cfg(all(feature = "utils", not(feature = "cache")))]
#[inline]
pub async fn shard_id(&self, shard_count: u64) -> u64 { self.id.shard_id(shard_count).await }
/// Returns the formatted URL of the guild's splash image, if one exists.
pub fn splash_url(&self) -> Option<String> {
self.icon
.as_ref()
.map(|icon| format!(cdn!("/splashes/{}/{}.webp"), self.id, icon))
}
/// Starts an integration sync for the given integration Id.
///
/// Requires the [Manage Guild] permission.
///
/// [Manage Guild]: Permissions::MANAGE_GUILD
#[inline]
pub async fn start_integration_sync(&self, http: impl AsRef<Http>, integration_id: impl Into<IntegrationId>) -> Result<()> {
self.id.start_integration_sync(&http, integration_id).await
}
/// Starts a prune of [`Member`]s.
///
/// See the documentation on [`GuildPrune`] for more information.
///
/// **Note**: Requires the [Kick Members] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Kick Members]: Permissions::KICK_MEMBERS
pub async fn start_prune(&self, cache_http: impl CacheHttp, days: u16) -> Result<GuildPrune> {
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::KICK_MEMBERS;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.start_prune(cache_http.http(), days).await
}
/// Unbans the given [`User`] from the guild.
///
/// **Note**: Requires the [Ban Members] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [Ban Members]: Permissions::BAN_MEMBERS
pub async fn unban(&self, cache_http: impl CacheHttp, user_id: impl Into<UserId>) -> Result<()> {
#[cfg(feature = "cache")]
{
if cache_http.cache().is_some() {
let req = Permissions::BAN_MEMBERS;
if !self.has_perms(&cache_http, req).await {
return Err(Error::Model(ModelError::InvalidPermissions(req)));
}
}
}
self.id.unban(&cache_http.http(), user_id).await
}
/// Retrieve's the guild's vanity URL.
///
/// **Note**: Requires the [Manage Guild] permission.
///
/// [Manage Guild]: Permissions::MANAGE_GUILD
#[inline]
pub async fn vanity_url(&self, http: impl AsRef<Http>) -> Result<String> {
self.id.vanity_url(&http).await
}
/// Retrieves the guild's webhooks.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [Manage Webhooks]: Permissions::MANAGE_WEBHOOKS
#[inline]
pub async fn webhooks(&self, http: impl AsRef<Http>) -> Result<Vec<Webhook>> {
self.id.webhooks(&http).await
}
/// Obtain a reference to a role by its name.
///
/// **Note**: If two or more roles have the same name, obtained reference will be one of
/// them.
///
/// # Examples
///
/// Obtain a reference to a [`Role`] by its name.
///
/// ```rust,no_run
/// # #[cfg(all(feature = "cache", feature = "client"))]
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
/// use serenity::model::prelude::*;
/// use serenity::prelude::*;
///
/// struct Handler;
///
/// #[serenity::async_trait]
/// impl EventHandler for Handler {
/// async fn message(&self, ctx: Context, msg: Message) {
/// if let Some(guild_id) = msg.guild_id {
/// if let Some(guild) = guild_id.to_guild_cached(&ctx).await {
/// if let Some(role) = guild.role_by_name("role_name") {
/// println!("{:?}", role);
/// }
/// }
/// }
/// }
/// }
///
/// let mut client = Client::builder("token").event_handler(Handler).await?;
///
/// client.start().await?;
/// # Ok(())
/// # }
/// ```
pub fn role_by_name(&self, role_name: &str) -> Option<&Role> {
self.roles.values().find(|role| role_name == role.name)
}
}
impl<'de> Deserialize<'de> for Guild {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> {
let mut map = JsonMap::deserialize(deserializer)?;
let id = map.get("id")
.and_then(|x| x.as_str())
.and_then(|x| x.parse::<u64>().ok());
if let Some(guild_id) = id {
if let Some(array) = map.get_mut("channels").and_then(|x| x.as_array_mut()) {
for value in array {
if let Some(channel) = value.as_object_mut() {
channel
.insert("guild_id".to_string(), Value::Number(Number::from(guild_id)));
}
}
}
if let Some(array) = map.get_mut("members").and_then(|x| x.as_array_mut()) {
for value in array {
if let Some(member) = value.as_object_mut() {
member
.insert("guild_id".to_string(), Value::Number(Number::from(guild_id)));
}
}
}
if let Some(array) = map.get_mut("roles").and_then(|x| x.as_array_mut()) {
for value in array {
if let Some(role) = value.as_object_mut() {
role
.insert("guild_id".to_string(), Value::Number(Number::from(guild_id)));
}
}
}
}
let afk_channel_id = match map.remove("afk_channel_id") {
Some(v) => serde_json::from_value::<Option<ChannelId>>(v)
.map_err(DeError::custom)?,
None => None,
};
let afk_timeout = map.remove("afk_timeout")
.ok_or_else(|| DeError::custom("expected guild afk_timeout"))
.and_then(u64::deserialize)
.map_err(DeError::custom)?;
let application_id = match map.remove("application_id") {
Some(v) => serde_json::from_value::<Option<ApplicationId>>(v)
.map_err(DeError::custom)?,
None => None,
};
let channels = map.remove("channels")
.ok_or_else(|| DeError::custom("expected guild channels"))
.and_then(deserialize_guild_channels)
.map_err(DeError::custom)?;
let default_message_notifications = map.remove("default_message_notifications")
.ok_or_else(|| {
DeError::custom("expected guild default_message_notifications")
})
.and_then(DefaultMessageNotificationLevel::deserialize)
.map_err(DeError::custom)?;
let emojis = map.remove("emojis")
.ok_or_else(|| DeError::custom("expected guild emojis"))
.and_then(deserialize_emojis)
.map_err(DeError::custom)?;
let explicit_content_filter = map.remove("explicit_content_filter")
.ok_or_else(|| DeError::custom(
"expected guild explicit_content_filter"
))
.and_then(ExplicitContentFilter::deserialize)
.map_err(DeError::custom)?;
let features = map.remove("features")
.ok_or_else(|| DeError::custom("expected guild features"))
.and_then(serde_json::from_value::<Vec<String>>)
.map_err(DeError::custom)?;
let icon = match map.remove("icon") {
Some(v) => Option::<String>::deserialize(v).map_err(DeError::custom)?,
None => None,
};
let id = map.remove("id")
.ok_or_else(|| DeError::custom("expected guild id"))
.and_then(GuildId::deserialize)
.map_err(DeError::custom)?;
let joined_at = map.remove("joined_at")
.ok_or_else(|| DeError::custom("expected guild joined_at"))
.and_then(DateTime::deserialize)
.map_err(DeError::custom)?;
let large = map.remove("large")
.ok_or_else(|| DeError::custom("expected guild large"))
.and_then(bool::deserialize)
.map_err(DeError::custom)?;
let member_count = map.remove("member_count")
.ok_or_else(|| DeError::custom("expected guild member_count"))
.and_then(u64::deserialize)
.map_err(DeError::custom)?;
let members = map.remove("members")
.ok_or_else(|| DeError::custom("expected guild members"))
.and_then(deserialize_members)
.map_err(DeError::custom)?;
let mfa_level = map.remove("mfa_level")
.ok_or_else(|| DeError::custom("expected guild mfa_level"))
.and_then(MfaLevel::deserialize)
.map_err(DeError::custom)?;
let name = map.remove("name")
.ok_or_else(|| DeError::custom("expected guild name"))
.and_then(String::deserialize)
.map_err(DeError::custom)?;
let owner_id = map.remove("owner_id")
.ok_or_else(|| DeError::custom("expected guild owner_id"))
.and_then(UserId::deserialize)
.map_err(DeError::custom)?;
let presences = map.remove("presences")
.ok_or_else(|| DeError::custom("expected guild presences"))
.and_then(deserialize_presences)
.map_err(DeError::custom)?;
let region = map.remove("region")
.ok_or_else(|| DeError::custom("expected guild region"))
.and_then(String::deserialize)
.map_err(DeError::custom)?;
let roles = map.remove("roles")
.ok_or_else(|| DeError::custom("expected guild roles"))
.and_then(deserialize_roles)
.map_err(DeError::custom)?;
let splash = match map.remove("splash") {
Some(v) => Option::<String>::deserialize(v).map_err(DeError::custom)?,
None => None,
};
let system_channel_id = match map.remove("system_channel_id") {
Some(v) => Option::<ChannelId>::deserialize(v).map_err(DeError::custom)?,
None => None,
};
let verification_level = map.remove("verification_level")
.ok_or_else(|| DeError::custom("expected guild verification_level"))
.and_then(VerificationLevel::deserialize)
.map_err(DeError::custom)?;
let voice_states = map.remove("voice_states")
.ok_or_else(|| DeError::custom("expected guild voice_states"))
.and_then(deserialize_voice_states)
.map_err(DeError::custom)?;
let description = match map.remove("description") {
Some(v) => Option::<String>::deserialize(v).map_err(DeError::custom)?,
None => None,
};
let premium_tier = match map.remove("premium_tier") {
Some(v) => PremiumTier::deserialize(v).map_err(DeError::custom)?,
None => PremiumTier::default(),
};
let premium_subscription_count = match map.remove("premium_subscription_count") {
Some(Value::Null) | None => 0,
Some(v) => u64::deserialize(v).map_err(DeError::custom)?,
};
let banner = match map.remove("banner") {
Some(v) => Option::<String>::deserialize(v).map_err(DeError::custom)?,
None => None,
};
let vanity_url_code = match map.remove("vanity_url_code") {
Some(v) => Option::<String>::deserialize(v).map_err(DeError::custom)?,
None => None,
};
let preferred_locale = map.remove("preferred_locale")
.ok_or_else(|| DeError::custom("expected preferred locale"))
.and_then(String::deserialize)
.map_err(DeError::custom)?;
Ok(Self {
afk_channel_id,
application_id,
afk_timeout,
channels,
default_message_notifications,
emojis,
explicit_content_filter,
features,
icon,
id,
joined_at,
large,
member_count,
members,
mfa_level,
name,
owner_id,
presences,
region,
roles,
splash,
system_channel_id,
verification_level,
voice_states,
description,
premium_tier,
premium_subscription_count,
banner,
vanity_url_code,
preferred_locale,
})
}
}
/// Checks if a `&str` contains another `&str`.
#[cfg(feature = "model")]
fn contains_case_insensitive(to_look_at: &str, to_find: &str) -> bool {
to_look_at.to_lowercase().contains(&to_find.to_lowercase())
}
/// Checks if a `&str` starts with another `&str`.
#[cfg(feature = "model")]
fn starts_with_case_insensitive(to_look_at: &str, to_find: &str) -> bool {
to_look_at.to_lowercase().starts_with(&to_find.to_lowercase())
}
/// Takes a `&str` as `origin` and tests if either
/// `word_a` or `word_b` is closer.
///
/// **Note**: Normally `word_a` and `word_b` are
/// expected to contain `origin` as substring.
/// If not, using `closest_to_origin` would sort these
/// the end.
#[cfg(feature = "model")]
fn closest_to_origin(origin: &str, word_a: &str, word_b: &str) -> std::cmp::Ordering {
let value_a = match word_a.find(origin) {
Some(value) => value + word_a.len(),
None => return std::cmp::Ordering::Greater,
};
let value_b = match word_b.find(origin) {
Some(value) => value + word_b.len(),
None => return std::cmp::Ordering::Less,
};
value_a.cmp(&value_b)
}
/// A container for guilds.
///
/// This is used to differentiate whether a guild itself can be used or whether
/// a guild needs to be retrieved from the cache.
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug)]
#[non_exhaustive]
pub enum GuildContainer {
/// A guild which can have its contents directly searched.
Guild(PartialGuild),
/// A guild's id, which can be used to search the cache for a guild.
Id(GuildId),
}
/// Information relating to a guild's widget embed.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub struct GuildEmbed {
/// The Id of the channel to show the embed for.
pub channel_id: ChannelId,
/// Whether the widget embed is enabled.
pub enabled: bool,
}
/// Representation of the number of members that would be pruned by a guild
/// prune operation.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub struct GuildPrune {
/// The number of members that would be pruned by the operation.
pub pruned: u64,
}
/// Basic information about a guild.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct GuildInfo {
/// The unique Id of the guild.
///
/// Can be used to calculate creation date.
pub id: GuildId,
/// The hash of the icon of the guild.
///
/// This can be used to generate a URL to the guild's icon image.
pub icon: Option<String>,
/// The name of the guild.
pub name: String,
/// Indicator of whether the current user is the owner.
pub owner: bool,
/// The permissions that the current user has.
pub permissions: Permissions,
}
#[cfg(any(feature = "model", feature = "utils"))]
impl GuildInfo {
/// Returns the formatted URL of the guild's icon, if the guild has an icon.
///
/// This will produce a WEBP image URL, or GIF if the guild has a GIF icon.
pub fn icon_url(&self) -> Option<String> {
self.icon
.as_ref()
.map(|icon| {
let ext = if icon.starts_with("a_") {
"gif"
} else {
"webp"
};
format!(cdn!("/icons/{}/{}.{}"), self.id, icon, ext)
})
}
}
impl From<PartialGuild> for GuildContainer {
fn from(guild: PartialGuild) -> GuildContainer { GuildContainer::Guild(guild) }
}
impl From<GuildId> for GuildContainer {
fn from(guild_id: GuildId) -> GuildContainer { GuildContainer::Id(guild_id) }
}
impl From<u64> for GuildContainer {
fn from(id: u64) -> GuildContainer { GuildContainer::Id(GuildId(id)) }
}
#[cfg(feature = "model")]
impl InviteGuild {
/// Returns the formatted URL of the guild's splash image, if one exists.
pub fn splash_url(&self) -> Option<String> {
self.icon
.as_ref()
.map(|icon| format!(cdn!("/splashes/{}/{}.webp"), self.id, icon))
}
}
/// Data for an unavailable guild.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub struct GuildUnavailable {
/// The Id of the [`Guild`] that may be unavailable.
pub id: GuildId,
/// Indicator of whether the guild is unavailable.
#[serde(default)]
pub unavailable: bool,
}
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
#[serde(untagged)]
pub enum GuildStatus {
OnlinePartialGuild(PartialGuild),
OnlineGuild(Guild),
Offline(GuildUnavailable),
}
#[cfg(feature = "model")]
impl GuildStatus {
/// Retrieves the Id of the inner [`Guild`].
pub fn id(&self) -> GuildId {
match *self {
GuildStatus::Offline(offline) => offline.id,
GuildStatus::OnlineGuild(ref guild) => guild.id,
GuildStatus::OnlinePartialGuild(ref partial_guild) => partial_guild.id,
}
}
}
/// Default message notification level for a guild.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
pub enum DefaultMessageNotificationLevel {
/// Receive notifications for everything.
All = 0,
/// Receive only mentions.
Mentions = 1,
}
enum_number!(
DefaultMessageNotificationLevel {
All,
Mentions,
}
);
impl DefaultMessageNotificationLevel {
pub fn num(self) -> u64 {
match self {
DefaultMessageNotificationLevel::All => 0,
DefaultMessageNotificationLevel::Mentions => 1,
}
}
}
/// Setting used to filter explicit messages from members.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
pub enum ExplicitContentFilter {
/// Don't scan any messages.
None = 0,
/// Scan messages from members without a role.
WithoutRole = 1,
/// Scan messages sent by all members.
All = 2,
}
enum_number!(
ExplicitContentFilter {
None,
WithoutRole,
All,
}
);
impl ExplicitContentFilter {
pub fn num(self) -> u64 {
match self {
ExplicitContentFilter::None => 0,
ExplicitContentFilter::WithoutRole => 1,
ExplicitContentFilter::All => 2,
}
}
}
/// Multi-Factor Authentication level for guild moderators.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
pub enum MfaLevel {
/// MFA is disabled.
None = 0,
/// MFA is enabled.
Elevated = 1,
}
enum_number!(
MfaLevel {
None,
Elevated,
}
);
impl MfaLevel {
pub fn num(self) -> u64 {
match self {
MfaLevel::None => 0,
MfaLevel::Elevated => 1,
}
}
}
/// The name of a region that a voice server can be located in.
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize)]
#[non_exhaustive]
pub enum Region {
#[serde(rename = "amsterdam")] Amsterdam,
#[serde(rename = "brazil")] Brazil,
#[serde(rename = "eu-central")] EuCentral,
#[serde(rename = "eu-west")] EuWest,
#[serde(rename = "frankfurt")] Frankfurt,
#[serde(rename = "hongkong")] HongKong,
#[serde(rename = "japan")] Japan,
#[serde(rename = "london")] London,
#[serde(rename = "russia")] Russia,
#[serde(rename = "singapore")] Singapore,
#[serde(rename = "sydney")] Sydney,
#[serde(rename = "us-central")] UsCentral,
#[serde(rename = "us-east")] UsEast,
#[serde(rename = "us-south")] UsSouth,
#[serde(rename = "us-west")] UsWest,
#[serde(rename = "vip-amsterdam")] VipAmsterdam,
#[serde(rename = "vip-us-east")] VipUsEast,
#[serde(rename = "vip-us-west")] VipUsWest,
}
impl Region {
pub fn name(&self) -> &str {
match *self {
Region::Amsterdam => "amsterdam",
Region::Brazil => "brazil",
Region::EuCentral => "eu-central",
Region::EuWest => "eu-west",
Region::Frankfurt => "frankfurt",
Region::HongKong => "hongkong",
Region::Japan => "japan",
Region::London => "london",
Region::Russia => "russia",
Region::Singapore => "singapore",
Region::Sydney => "sydney",
Region::UsCentral => "us-central",
Region::UsEast => "us-east",
Region::UsSouth => "us-south",
Region::UsWest => "us-west",
Region::VipAmsterdam => "vip-amsterdam",
Region::VipUsEast => "vip-us-east",
Region::VipUsWest => "vip-us-west",
}
}
}
/// The level to set as criteria prior to a user being able to send
/// messages in a [`Guild`].
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[non_exhaustive]
pub enum VerificationLevel {
/// Does not require any verification.
None = 0,
/// Must have a verified email on the user's Discord account.
Low = 1,
/// Must also be a registered user on Discord for longer than 5 minutes.
Medium = 2,
/// Must also be a member of the guild for longer than 10 minutes.
High = 3,
/// Must have a verified phone on the user's Discord account.
Higher = 4,
}
enum_number!(
VerificationLevel {
None,
Low,
Medium,
High,
Higher,
}
);
impl VerificationLevel {
pub fn num(self) -> u64 {
match self {
VerificationLevel::None => 0,
VerificationLevel::Low => 1,
VerificationLevel::Medium => 2,
VerificationLevel::High => 3,
VerificationLevel::Higher => 4,
}
}
}
#[cfg(test)]
mod test {
#[cfg(feature = "model")]
mod model {
use chrono::prelude::*;
use crate::model::prelude::*;
use std::collections::*;
fn gen_user() -> User {
User {
id: UserId(210),
avatar: Some("abc".to_string()),
bot: true,
discriminator: 1432,
name: "test".to_string(),
}
}
fn gen_member() -> Member {
let dt: DateTime<Utc> = FixedOffset::east(5 * 3600)
.ymd(2016, 11, 08)
.and_hms(0, 0, 0)
.with_timezone(&Utc);
let vec1 = Vec::new();
let u = gen_user();
Member {
deaf: false,
guild_id: GuildId(1),
joined_at: Some(dt),
mute: false,
nick: Some("aaaa".to_string()),
roles: vec1,
user: u,
}
}
fn gen() -> Guild {
let u = gen_user();
let m = gen_member();
let hm1 = HashMap::new();
let hm2 = HashMap::new();
let vec1 = Vec::new();
let dt: DateTime<Utc> = FixedOffset::east(5 * 3600)
.ymd(2016, 11, 08)
.and_hms(0, 0, 0)
.with_timezone(&Utc);
let mut hm3 = HashMap::new();
let hm4 = HashMap::new();
let hm5 = HashMap::new();
let hm6 = HashMap::new();
hm3.insert(u.id, m);
let notifications = DefaultMessageNotificationLevel::All;
Guild {
afk_channel_id: Some(ChannelId(0)),
afk_timeout: 0,
channels: hm1,
default_message_notifications: notifications,
emojis: hm2,
features: vec1,
icon: Some("/avatars/210/a_aaa.webp?size=1024".to_string()),
id: GuildId(1),
joined_at: dt,
large: false,
member_count: 1,
members: hm3,
mfa_level: MfaLevel::Elevated,
name: "Spaghetti".to_string(),
owner_id: UserId(210),
presences: hm4,
region: "NA".to_string(),
roles: hm5,
splash: Some("asdf".to_string()),
verification_level: VerificationLevel::None,
voice_states: hm6,
description: None,
premium_tier: PremiumTier::Tier1,
application_id: Some(ApplicationId(0)),
explicit_content_filter: ExplicitContentFilter::None,
system_channel_id: Some(ChannelId(0)),
premium_subscription_count: 12,
banner: None,
vanity_url_code: Some("bruhmoment".to_string()),
preferred_locale: "en-US".to_string(),
}
}
#[tokio::test]
async fn member_named_username() {
let guild = gen();
let lhs = guild
.member_named("test#1432")
.unwrap()
.display_name();
assert_eq!(lhs, gen_member().display_name());
}
#[tokio::test]
async fn member_named_nickname() {
let guild = gen();
let lhs = guild.member_named("aaaa").unwrap().display_name();
assert_eq!(lhs, gen_member().display_name());
}
}
}
| 34.935772 | 150 | 0.555884 |
03ab24cc535fbff13c4b59219d740111ea3b19ad
| 33,727 |
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote, SlotInfo};
use jsonrpc_core::{Error, ErrorCode, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
use solana_account_decoder::UiAccount;
use solana_client::rpc_response::{Response as RpcResponse, RpcKeyedAccount, RpcSignatureResult};
#[cfg(test)]
use solana_runtime::bank_forks::BankForks;
use solana_sdk::{
clock::Slot, commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature,
};
#[cfg(test)]
use std::sync::RwLock;
use std::{
str::FromStr,
sync::{atomic, Arc},
};
// Suppress needless_return due to
// https://github.com/paritytech/jsonrpc/blob/2d38e6424d8461cdf72e78425ce67d51af9c6586/derive/src/lib.rs#L204
// Once https://github.com/paritytech/jsonrpc/issues/418 is resolved, try to remove this clippy allow
#[allow(clippy::needless_return)]
#[rpc]
pub trait RpcSolPubSub {
type Metadata;
// Get notification every time account data is changed
// Accepts pubkey parameter as base-58 encoded string
#[pubsub(
subscription = "accountNotification",
subscribe,
name = "accountSubscribe"
)]
fn account_subscribe(
&self,
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<UiAccount>>,
pubkey_str: String,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from account notification subscription.
#[pubsub(
subscription = "accountNotification",
unsubscribe,
name = "accountUnsubscribe"
)]
fn account_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId)
-> Result<bool>;
// Get notification every time account data owned by a particular program is changed
// Accepts pubkey parameter as base-58 encoded string
#[pubsub(
subscription = "programNotification",
subscribe,
name = "programSubscribe"
)]
fn program_subscribe(
&self,
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from account notification subscription.
#[pubsub(
subscription = "programNotification",
unsubscribe,
name = "programUnsubscribe"
)]
fn program_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId)
-> Result<bool>;
// Get notification when signature is verified
// Accepts signature parameter as base-58 encoded string
#[pubsub(
subscription = "signatureNotification",
subscribe,
name = "signatureSubscribe"
)]
fn signature_subscribe(
&self,
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from signature notification subscription.
#[pubsub(
subscription = "signatureNotification",
unsubscribe,
name = "signatureUnsubscribe"
)]
fn signature_unsubscribe(
&self,
meta: Option<Self::Metadata>,
id: SubscriptionId,
) -> Result<bool>;
// Get notification when slot is encountered
#[pubsub(subscription = "slotNotification", subscribe, name = "slotSubscribe")]
fn slot_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<SlotInfo>);
// Unsubscribe from slot notification subscription.
#[pubsub(
subscription = "slotNotification",
unsubscribe,
name = "slotUnsubscribe"
)]
fn slot_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
// Get notification when vote is encountered
#[pubsub(subscription = "voteNotification", subscribe, name = "voteSubscribe")]
fn vote_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<RpcVote>);
// Unsubscribe from vote notification subscription.
#[pubsub(
subscription = "voteNotification",
unsubscribe,
name = "voteUnsubscribe"
)]
fn vote_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
// Get notification when a new root is set
#[pubsub(subscription = "rootNotification", subscribe, name = "rootSubscribe")]
fn root_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<Slot>);
// Unsubscribe from slot notification subscription.
#[pubsub(
subscription = "rootNotification",
unsubscribe,
name = "rootUnsubscribe"
)]
fn root_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
}
pub struct RpcSolPubSubImpl {
uid: Arc<atomic::AtomicUsize>,
subscriptions: Arc<RpcSubscriptions>,
}
impl RpcSolPubSubImpl {
pub fn new(subscriptions: Arc<RpcSubscriptions>) -> Self {
let uid = Arc::new(atomic::AtomicUsize::default());
Self { uid, subscriptions }
}
#[cfg(test)]
fn default_with_bank_forks(bank_forks: Arc<RwLock<BankForks>>) -> Self {
let uid = Arc::new(atomic::AtomicUsize::default());
let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks));
Self { uid, subscriptions }
}
}
fn param<T: FromStr>(param_str: &str, thing: &str) -> Result<T> {
param_str.parse::<T>().map_err(|_e| Error {
code: ErrorCode::InvalidParams,
message: format!("Invalid Request: Invalid {} provided", thing),
data: None,
})
}
impl RpcSolPubSub for RpcSolPubSubImpl {
type Metadata = Arc<Session>;
fn account_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<UiAccount>>,
pubkey_str: String,
commitment: Option<CommitmentConfig>,
) {
match param::<Pubkey>(&pubkey_str, "pubkey") {
Ok(pubkey) => {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
self.subscriptions
.add_account_subscription(pubkey, commitment, sub_id, subscriber)
}
Err(e) => subscriber.reject(e).unwrap(),
}
}
fn account_unsubscribe(
&self,
_meta: Option<Self::Metadata>,
id: SubscriptionId,
) -> Result<bool> {
info!("account_unsubscribe: id={:?}", id);
if self.subscriptions.remove_account_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
fn program_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
commitment: Option<CommitmentConfig>,
) {
match param::<Pubkey>(&pubkey_str, "pubkey") {
Ok(pubkey) => {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("program_subscribe: account={:?} id={:?}", pubkey, sub_id);
self.subscriptions
.add_program_subscription(pubkey, commitment, sub_id, subscriber)
}
Err(e) => subscriber.reject(e).unwrap(),
}
}
fn program_unsubscribe(
&self,
_meta: Option<Self::Metadata>,
id: SubscriptionId,
) -> Result<bool> {
info!("program_unsubscribe: id={:?}", id);
if self.subscriptions.remove_program_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
fn signature_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
commitment: Option<CommitmentConfig>,
) {
info!("signature_subscribe");
match param::<Signature>(&signature_str, "signature") {
Ok(signature) => {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!(
"signature_subscribe: signature={:?} id={:?}",
signature, sub_id
);
self.subscriptions
.add_signature_subscription(signature, commitment, sub_id, subscriber);
}
Err(e) => subscriber.reject(e).unwrap(),
}
}
fn signature_unsubscribe(
&self,
_meta: Option<Self::Metadata>,
id: SubscriptionId,
) -> Result<bool> {
info!("signature_unsubscribe");
if self.subscriptions.remove_signature_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
fn slot_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<SlotInfo>) {
info!("slot_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("slot_subscribe: id={:?}", sub_id);
self.subscriptions.add_slot_subscription(sub_id, subscriber);
}
fn slot_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
info!("slot_unsubscribe");
if self.subscriptions.remove_slot_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
fn vote_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<RpcVote>) {
info!("vote_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("vote_subscribe: id={:?}", sub_id);
self.subscriptions.add_vote_subscription(sub_id, subscriber);
}
fn vote_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
info!("vote_unsubscribe");
if self.subscriptions.remove_vote_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
fn root_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<Slot>) {
info!("root_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("root_subscribe: id={:?}", sub_id);
self.subscriptions.add_root_subscription(sub_id, subscriber);
}
fn root_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
info!("root_unsubscribe");
if self.subscriptions.remove_root_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
rpc_subscriptions::{tests::robust_poll_or_panic, CacheSlotInfo},
};
use crossbeam_channel::unbounded;
use jsonrpc_core::{futures::sync::mpsc, Response};
use jsonrpc_pubsub::{PubSubHandler, Session};
use serial_test_derive::serial;
use solana_budget_program::{self, budget_instruction};
use solana_runtime::{
bank::Bank,
bank_forks::BankForks,
commitment::BlockCommitmentCache,
genesis_utils::{
create_genesis_config, create_genesis_config_with_vote_accounts, GenesisConfigInfo,
ValidatorVoteKeypairs,
},
};
use solana_sdk::{
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_program, system_transaction,
transaction::{self, Transaction},
};
use solana_vote_program::vote_transaction;
use std::{
sync::{atomic::AtomicBool, RwLock},
thread::sleep,
time::Duration,
};
fn process_transaction_and_notify(
bank_forks: &RwLock<BankForks>,
tx: &Transaction,
subscriptions: &RpcSubscriptions,
current_slot: Slot,
) -> transaction::Result<()> {
bank_forks
.write()
.unwrap()
.get(current_slot)
.unwrap()
.process_transaction(tx)?;
let mut cache_slot_info = CacheSlotInfo::default();
cache_slot_info.current_slot = current_slot;
subscriptions.notify_subscribers(cache_slot_info);
Ok(())
}
fn create_session() -> Arc<Session> {
Arc::new(Session::new(mpsc::channel(1).0))
}
#[test]
#[serial]
fn test_signature_subscribe() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bob = Keypair::new();
let bob_pubkey = bob.pubkey();
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
)),
uid: Arc::new(atomic::AtomicUsize::default()),
};
// Test signature subscriptions
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("signatureNotification");
rpc.signature_subscribe(session, subscriber, tx.signatures[0].to_string(), None);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 0).unwrap();
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = RpcSignatureResult { err: None };
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": 0 },
"value": expected_res,
},
"subscription": 0,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
}
#[test]
#[serial]
fn test_signature_unsubscribe() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bob_pubkey = Pubkey::new_rand();
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let session = create_session();
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks);
io.extend_with(rpc.to_delegate());
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"signatureSubscribe","params":["{}"]}}"#,
tx.signatures[0].to_string()
);
let _res = io.handle_request_sync(&req, session.clone());
let req = r#"{"jsonrpc":"2.0","id":1,"method":"signatureUnsubscribe","params":[0]}"#;
let res = io.handle_request_sync(&req, session.clone());
let expected = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
let expected: Response = serde_json::from_str(&expected).unwrap();
let result: Response = serde_json::from_str(&res.unwrap()).unwrap();
assert_eq!(expected, result);
// Test bad parameter
let req = r#"{"jsonrpc":"2.0","id":1,"method":"signatureUnsubscribe","params":[1]}"#;
let res = io.handle_request_sync(&req, session);
let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid Request: Subscription id does not exist"},"id":1}"#;
let expected: Response = serde_json::from_str(&expected).unwrap();
let result: Response = serde_json::from_str(&res.unwrap()).unwrap();
assert_eq!(expected, result);
}
#[test]
#[serial]
fn test_account_subscribe() {
let GenesisConfigInfo {
mut genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
// This test depends on the budget program
genesis_config
.native_instruction_processors
.push(solana_budget_program!());
let bob_pubkey = Pubkey::new_rand();
let witness = Keypair::new();
let contract_funds = Keypair::new();
let contract_state = Keypair::new();
let budget_program_id = solana_budget_program::id();
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_bank(
bank_forks.read().unwrap().get(1).unwrap().clone(),
1,
))),
)),
uid: Arc::new(atomic::AtomicUsize::default()),
};
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(
session,
subscriber,
contract_state.pubkey().to_string(),
Some(CommitmentConfig::recent()),
);
let tx = system_transaction::transfer(&alice, &contract_funds.pubkey(), 51, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
let ixs = budget_instruction::when_signed(
&contract_funds.pubkey(),
&bob_pubkey,
&contract_state.pubkey(),
&witness.pubkey(),
None,
51,
);
let message = Message::new(&ixs, Some(&contract_funds.pubkey()));
let tx = Transaction::new(&[&contract_funds, &contract_state], message, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
// Test signature confirmation notification #1
let expected_data = bank_forks
.read()
.unwrap()
.get(1)
.unwrap()
.get_account(&contract_state.pubkey())
.unwrap()
.data;
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 1 },
"value": {
"owner": budget_program_id.to_string(),
"lamports": 51,
"data": bs58::encode(expected_data).into_string(),
"executable": false,
"rentEpoch": 1,
},
},
"subscription": 0,
}
});
let (response, _) = robust_poll_or_panic(receiver);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
let tx = system_transaction::transfer(&alice, &witness.pubkey(), 1, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
let ix = budget_instruction::apply_signature(
&witness.pubkey(),
&contract_state.pubkey(),
&bob_pubkey,
);
let message = Message::new(&[ix], Some(&witness.pubkey()));
let tx = Transaction::new(&[&witness], message, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
assert_eq!(
bank_forks
.read()
.unwrap()
.get(1)
.unwrap()
.get_account(&contract_state.pubkey()),
None
);
}
#[test]
#[serial]
fn test_account_unsubscribe() {
let bob_pubkey = Pubkey::new_rand();
let session = create_session();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(&genesis_config))));
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks);
io.extend_with(rpc.to_delegate());
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"accountSubscribe","params":["{}"]}}"#,
bob_pubkey.to_string()
);
let _res = io.handle_request_sync(&req, session.clone());
let req = r#"{"jsonrpc":"2.0","id":1,"method":"accountUnsubscribe","params":[0]}"#;
let res = io.handle_request_sync(&req, session.clone());
let expected = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
let expected: Response = serde_json::from_str(&expected).unwrap();
let result: Response = serde_json::from_str(&res.unwrap()).unwrap();
assert_eq!(expected, result);
// Test bad parameter
let req = r#"{"jsonrpc":"2.0","id":1,"method":"accountUnsubscribe","params":[1]}"#;
let res = io.handle_request_sync(&req, session);
let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid Request: Subscription id does not exist"},"id":1}"#;
let expected: Response = serde_json::from_str(&expected).unwrap();
let result: Response = serde_json::from_str(&res.unwrap()).unwrap();
assert_eq!(expected, result);
}
#[test]
#[should_panic]
fn test_account_commitment_not_fulfilled() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bob = Keypair::new();
let mut rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks.clone());
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(
session,
subscriber,
bob.pubkey().to_string(),
Some(CommitmentConfig::root()),
);
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
bank_forks
.write()
.unwrap()
.get(1)
.unwrap()
.process_transaction(&tx)
.unwrap();
rpc.subscriptions
.notify_subscribers(CacheSlotInfo::default());
// allow 200ms for notification thread to wake
std::thread::sleep(Duration::from_millis(200));
let _panic = robust_poll_or_panic(receiver);
}
#[test]
fn test_account_commitment() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bob = Keypair::new();
let mut rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks.clone());
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests()));
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache);
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(
session,
subscriber,
bob.pubkey().to_string(),
Some(CommitmentConfig::root()),
);
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
bank_forks
.write()
.unwrap()
.get(1)
.unwrap()
.process_transaction(&tx)
.unwrap();
let mut cache_slot_info = CacheSlotInfo::default();
cache_slot_info.current_slot = 1;
rpc.subscriptions.notify_subscribers(cache_slot_info);
let cache_slot_info = CacheSlotInfo {
current_slot: 2,
node_root: 1,
largest_confirmed_root: 1,
highest_confirmed_slot: 1,
};
rpc.subscriptions.notify_subscribers(cache_slot_info);
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 1 },
"value": {
"owner": system_program::id().to_string(),
"lamports": 100,
"data": "",
"executable": false,
"rentEpoch": 1,
},
},
"subscription": 0,
}
});
let (response, _) = robust_poll_or_panic(receiver);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
}
#[test]
#[serial]
fn test_slot_subscribe() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
rpc.subscriptions.notify_slot(0, 0, 0);
// Test slot confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = SlotInfo {
parent: 0,
slot: 0,
root: 0,
};
let expected_res_str =
serde_json::to_string(&serde_json::to_value(expected_res).unwrap()).unwrap();
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"slotNotification","params":{{"result":{},"subscription":0}}}}"#,
expected_res_str
);
assert_eq!(expected, response);
}
#[test]
#[serial]
fn test_slot_unsubscribe() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
rpc.subscriptions.notify_slot(0, 0, 0);
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = SlotInfo {
parent: 0,
slot: 0,
root: 0,
};
let expected_res_str =
serde_json::to_string(&serde_json::to_value(expected_res).unwrap()).unwrap();
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"slotNotification","params":{{"result":{},"subscription":0}}}}"#,
expected_res_str
);
assert_eq!(expected, response);
let session = create_session();
assert!(rpc
.slot_unsubscribe(Some(session), SubscriptionId::Number(42))
.is_err());
let session = create_session();
assert!(rpc
.slot_unsubscribe(Some(session), SubscriptionId::Number(0))
.is_ok());
}
#[test]
#[serial]
fn test_vote_subscribe() {
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests()));
let validator_voting_keypairs: Vec<_> = (0..10)
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
.collect();
let GenesisConfigInfo { genesis_config, .. } =
create_genesis_config_with_vote_accounts(10_000, &validator_voting_keypairs, 100);
let exit = Arc::new(AtomicBool::new(false));
let bank = Bank::new(&genesis_config);
let bank_forks = BankForks::new(bank);
let bank = bank_forks.get(0).unwrap().clone();
let bank_forks = Arc::new(RwLock::new(bank_forks));
// Setup RPC
let mut rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks.clone());
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("voteNotification");
// Setup Subscriptions
let subscriptions = RpcSubscriptions::new(&exit, bank_forks, block_commitment_cache);
rpc.subscriptions = Arc::new(subscriptions);
rpc.vote_subscribe(session, subscriber);
// Create some voters at genesis
let vote_tracker = VoteTracker::new(&bank);
let (votes_sender, votes_receiver) = unbounded();
let (vote_tracker, validator_voting_keypairs) =
(Arc::new(vote_tracker), validator_voting_keypairs);
let vote_slots = vec![1, 2];
validator_voting_keypairs.iter().for_each(|keypairs| {
let node_keypair = &keypairs.node_keypair;
let vote_keypair = &keypairs.vote_keypair;
let vote_tx = vote_transaction::new_vote_transaction(
vote_slots.clone(),
Hash::default(),
Hash::default(),
node_keypair,
vote_keypair,
vote_keypair,
);
votes_sender.send(vec![vote_tx]).unwrap();
});
// Process votes and check they were notified.
ClusterInfoVoteListener::get_and_process_votes_for_tests(
&votes_receiver,
&vote_tracker,
0,
&rpc.subscriptions,
)
.unwrap();
let (response, _) = robust_poll_or_panic(receiver);
assert_eq!(
response,
r#"{"jsonrpc":"2.0","method":"voteNotification","params":{"result":{"hash":"11111111111111111111111111111111","slots":[1,2],"timestamp":null},"subscription":0}}"#
);
}
#[test]
#[serial]
fn test_vote_unsubscribe() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let rpc = RpcSolPubSubImpl::default_with_bank_forks(bank_forks);
let session = create_session();
let (subscriber, _id_receiver, _) = Subscriber::new_test("voteNotification");
rpc.vote_subscribe(session, subscriber);
let session = create_session();
assert!(rpc
.vote_unsubscribe(Some(session), SubscriptionId::Number(42))
.is_err());
let session = create_session();
assert!(rpc
.vote_unsubscribe(Some(session), SubscriptionId::Number(0))
.is_ok());
}
}
| 36.779716 | 174 | 0.586859 |
08af27ca33d8cbef7f5a85916f567b756455de59
| 684 |
//! Utilities and tests for multi-scalar multiplication.
pub mod stream_pippenger;
pub mod variable_base;
pub use stream_pippenger::*;
fn ln_without_floats(a: usize) -> usize {
// log2(a) * ln(2)
(ark_std::log2(a) * 69 / 100) as usize
}
fn bounded_ln_without_floats(a: usize, max_msm_buffer_log: usize) -> usize {
if a < 32 {
3
} else {
// in theory, we cannot store more than log memory.
// Hence, we cap the number of buckets to avoid flooding memory.
let optimal_size = ln_without_floats(a);
if optimal_size > max_msm_buffer_log {
max_msm_buffer_log
} else {
optimal_size
}
}
}
| 26.307692 | 76 | 0.624269 |
160a64ddc9cab1875044018f8e741c0797ce7ec3
| 32 |
aimax.osm.routing.OsmMoveAction
| 16 | 31 | 0.875 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.