hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
8a1909e29e1615bec68ef730245ae55d78260ae5 | 198,624 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
client: aws_smithy_client::Client<C, M, R>,
conf: crate::Config,
}
/// An ergonomic service client for `AmazonEC2ContainerRegistry_V20150921`.
///
/// This client allows ergonomic access to a `AmazonEC2ContainerRegistry_V20150921`-shaped service.
/// Each method corresponds to an endpoint defined in the service's Smithy model,
/// and the request and response shapes are auto-generated from that same model.
///
/// # Using a Client
///
/// Once you have a client set up, you can access the service's endpoints
/// by calling the appropriate method on [`Client`]. Each such method
/// returns a request builder for that endpoint, with methods for setting
/// the various fields of the request. Once your request is complete, use
/// the `send` method to send the request. `send` returns a future, which
/// you then have to `.await` to get the service's response.
///
/// [builder pattern]: https://rust-lang.github.io/api-guidelines/type-safety.html#c-builder
/// [SigV4-signed requests]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the `BatchCheckLayerAvailability` operation.
///
/// See [`BatchCheckLayerAvailability`](crate::client::fluent_builders::BatchCheckLayerAvailability) for more information about the
/// operation and its arguments.
pub fn batch_check_layer_availability(
&self,
) -> fluent_builders::BatchCheckLayerAvailability<C, M, R> {
fluent_builders::BatchCheckLayerAvailability::new(self.handle.clone())
}
/// Constructs a fluent builder for the `BatchDeleteImage` operation.
///
/// See [`BatchDeleteImage`](crate::client::fluent_builders::BatchDeleteImage) for more information about the
/// operation and its arguments.
pub fn batch_delete_image(&self) -> fluent_builders::BatchDeleteImage<C, M, R> {
fluent_builders::BatchDeleteImage::new(self.handle.clone())
}
/// Constructs a fluent builder for the `BatchGetImage` operation.
///
/// See [`BatchGetImage`](crate::client::fluent_builders::BatchGetImage) for more information about the
/// operation and its arguments.
pub fn batch_get_image(&self) -> fluent_builders::BatchGetImage<C, M, R> {
fluent_builders::BatchGetImage::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CompleteLayerUpload` operation.
///
/// See [`CompleteLayerUpload`](crate::client::fluent_builders::CompleteLayerUpload) for more information about the
/// operation and its arguments.
pub fn complete_layer_upload(&self) -> fluent_builders::CompleteLayerUpload<C, M, R> {
fluent_builders::CompleteLayerUpload::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateRepository` operation.
///
/// See [`CreateRepository`](crate::client::fluent_builders::CreateRepository) for more information about the
/// operation and its arguments.
pub fn create_repository(&self) -> fluent_builders::CreateRepository<C, M, R> {
fluent_builders::CreateRepository::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteLifecyclePolicy` operation.
///
/// See [`DeleteLifecyclePolicy`](crate::client::fluent_builders::DeleteLifecyclePolicy) for more information about the
/// operation and its arguments.
pub fn delete_lifecycle_policy(&self) -> fluent_builders::DeleteLifecyclePolicy<C, M, R> {
fluent_builders::DeleteLifecyclePolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteRegistryPolicy` operation.
///
/// See [`DeleteRegistryPolicy`](crate::client::fluent_builders::DeleteRegistryPolicy) for more information about the
/// operation and its arguments.
pub fn delete_registry_policy(&self) -> fluent_builders::DeleteRegistryPolicy<C, M, R> {
fluent_builders::DeleteRegistryPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteRepository` operation.
///
/// See [`DeleteRepository`](crate::client::fluent_builders::DeleteRepository) for more information about the
/// operation and its arguments.
pub fn delete_repository(&self) -> fluent_builders::DeleteRepository<C, M, R> {
fluent_builders::DeleteRepository::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteRepositoryPolicy` operation.
///
/// See [`DeleteRepositoryPolicy`](crate::client::fluent_builders::DeleteRepositoryPolicy) for more information about the
/// operation and its arguments.
pub fn delete_repository_policy(&self) -> fluent_builders::DeleteRepositoryPolicy<C, M, R> {
fluent_builders::DeleteRepositoryPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeImageReplicationStatus` operation.
///
/// See [`DescribeImageReplicationStatus`](crate::client::fluent_builders::DescribeImageReplicationStatus) for more information about the
/// operation and its arguments.
pub fn describe_image_replication_status(
&self,
) -> fluent_builders::DescribeImageReplicationStatus<C, M, R> {
fluent_builders::DescribeImageReplicationStatus::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeImages` operation.
///
/// See [`DescribeImages`](crate::client::fluent_builders::DescribeImages) for more information about the
/// operation and its arguments.
pub fn describe_images(&self) -> fluent_builders::DescribeImages<C, M, R> {
fluent_builders::DescribeImages::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeImageScanFindings` operation.
///
/// See [`DescribeImageScanFindings`](crate::client::fluent_builders::DescribeImageScanFindings) for more information about the
/// operation and its arguments.
pub fn describe_image_scan_findings(
&self,
) -> fluent_builders::DescribeImageScanFindings<C, M, R> {
fluent_builders::DescribeImageScanFindings::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeRegistry` operation.
///
/// See [`DescribeRegistry`](crate::client::fluent_builders::DescribeRegistry) for more information about the
/// operation and its arguments.
pub fn describe_registry(&self) -> fluent_builders::DescribeRegistry<C, M, R> {
fluent_builders::DescribeRegistry::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeRepositories` operation.
///
/// See [`DescribeRepositories`](crate::client::fluent_builders::DescribeRepositories) for more information about the
/// operation and its arguments.
pub fn describe_repositories(&self) -> fluent_builders::DescribeRepositories<C, M, R> {
fluent_builders::DescribeRepositories::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetAuthorizationToken` operation.
///
/// See [`GetAuthorizationToken`](crate::client::fluent_builders::GetAuthorizationToken) for more information about the
/// operation and its arguments.
pub fn get_authorization_token(&self) -> fluent_builders::GetAuthorizationToken<C, M, R> {
fluent_builders::GetAuthorizationToken::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetDownloadUrlForLayer` operation.
///
/// See [`GetDownloadUrlForLayer`](crate::client::fluent_builders::GetDownloadUrlForLayer) for more information about the
/// operation and its arguments.
pub fn get_download_url_for_layer(&self) -> fluent_builders::GetDownloadUrlForLayer<C, M, R> {
fluent_builders::GetDownloadUrlForLayer::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetLifecyclePolicy` operation.
///
/// See [`GetLifecyclePolicy`](crate::client::fluent_builders::GetLifecyclePolicy) for more information about the
/// operation and its arguments.
pub fn get_lifecycle_policy(&self) -> fluent_builders::GetLifecyclePolicy<C, M, R> {
fluent_builders::GetLifecyclePolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetLifecyclePolicyPreview` operation.
///
/// See [`GetLifecyclePolicyPreview`](crate::client::fluent_builders::GetLifecyclePolicyPreview) for more information about the
/// operation and its arguments.
pub fn get_lifecycle_policy_preview(
&self,
) -> fluent_builders::GetLifecyclePolicyPreview<C, M, R> {
fluent_builders::GetLifecyclePolicyPreview::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetRegistryPolicy` operation.
///
/// See [`GetRegistryPolicy`](crate::client::fluent_builders::GetRegistryPolicy) for more information about the
/// operation and its arguments.
pub fn get_registry_policy(&self) -> fluent_builders::GetRegistryPolicy<C, M, R> {
fluent_builders::GetRegistryPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetRepositoryPolicy` operation.
///
/// See [`GetRepositoryPolicy`](crate::client::fluent_builders::GetRepositoryPolicy) for more information about the
/// operation and its arguments.
pub fn get_repository_policy(&self) -> fluent_builders::GetRepositoryPolicy<C, M, R> {
fluent_builders::GetRepositoryPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `InitiateLayerUpload` operation.
///
/// See [`InitiateLayerUpload`](crate::client::fluent_builders::InitiateLayerUpload) for more information about the
/// operation and its arguments.
pub fn initiate_layer_upload(&self) -> fluent_builders::InitiateLayerUpload<C, M, R> {
fluent_builders::InitiateLayerUpload::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListImages` operation.
///
/// See [`ListImages`](crate::client::fluent_builders::ListImages) for more information about the
/// operation and its arguments.
pub fn list_images(&self) -> fluent_builders::ListImages<C, M, R> {
fluent_builders::ListImages::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListTagsForResource` operation.
///
/// See [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) for more information about the
/// operation and its arguments.
pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource<C, M, R> {
fluent_builders::ListTagsForResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutImage` operation.
///
/// See [`PutImage`](crate::client::fluent_builders::PutImage) for more information about the
/// operation and its arguments.
pub fn put_image(&self) -> fluent_builders::PutImage<C, M, R> {
fluent_builders::PutImage::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutImageScanningConfiguration` operation.
///
/// See [`PutImageScanningConfiguration`](crate::client::fluent_builders::PutImageScanningConfiguration) for more information about the
/// operation and its arguments.
pub fn put_image_scanning_configuration(
&self,
) -> fluent_builders::PutImageScanningConfiguration<C, M, R> {
fluent_builders::PutImageScanningConfiguration::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutImageTagMutability` operation.
///
/// See [`PutImageTagMutability`](crate::client::fluent_builders::PutImageTagMutability) for more information about the
/// operation and its arguments.
pub fn put_image_tag_mutability(&self) -> fluent_builders::PutImageTagMutability<C, M, R> {
fluent_builders::PutImageTagMutability::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutLifecyclePolicy` operation.
///
/// See [`PutLifecyclePolicy`](crate::client::fluent_builders::PutLifecyclePolicy) for more information about the
/// operation and its arguments.
pub fn put_lifecycle_policy(&self) -> fluent_builders::PutLifecyclePolicy<C, M, R> {
fluent_builders::PutLifecyclePolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutRegistryPolicy` operation.
///
/// See [`PutRegistryPolicy`](crate::client::fluent_builders::PutRegistryPolicy) for more information about the
/// operation and its arguments.
pub fn put_registry_policy(&self) -> fluent_builders::PutRegistryPolicy<C, M, R> {
fluent_builders::PutRegistryPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutReplicationConfiguration` operation.
///
/// See [`PutReplicationConfiguration`](crate::client::fluent_builders::PutReplicationConfiguration) for more information about the
/// operation and its arguments.
pub fn put_replication_configuration(
&self,
) -> fluent_builders::PutReplicationConfiguration<C, M, R> {
fluent_builders::PutReplicationConfiguration::new(self.handle.clone())
}
/// Constructs a fluent builder for the `SetRepositoryPolicy` operation.
///
/// See [`SetRepositoryPolicy`](crate::client::fluent_builders::SetRepositoryPolicy) for more information about the
/// operation and its arguments.
pub fn set_repository_policy(&self) -> fluent_builders::SetRepositoryPolicy<C, M, R> {
fluent_builders::SetRepositoryPolicy::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StartImageScan` operation.
///
/// See [`StartImageScan`](crate::client::fluent_builders::StartImageScan) for more information about the
/// operation and its arguments.
pub fn start_image_scan(&self) -> fluent_builders::StartImageScan<C, M, R> {
fluent_builders::StartImageScan::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StartLifecyclePolicyPreview` operation.
///
/// See [`StartLifecyclePolicyPreview`](crate::client::fluent_builders::StartLifecyclePolicyPreview) for more information about the
/// operation and its arguments.
pub fn start_lifecycle_policy_preview(
&self,
) -> fluent_builders::StartLifecyclePolicyPreview<C, M, R> {
fluent_builders::StartLifecyclePolicyPreview::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TagResource` operation.
///
/// See [`TagResource`](crate::client::fluent_builders::TagResource) for more information about the
/// operation and its arguments.
pub fn tag_resource(&self) -> fluent_builders::TagResource<C, M, R> {
fluent_builders::TagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UntagResource` operation.
///
/// See [`UntagResource`](crate::client::fluent_builders::UntagResource) for more information about the
/// operation and its arguments.
pub fn untag_resource(&self) -> fluent_builders::UntagResource<C, M, R> {
fluent_builders::UntagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UploadLayerPart` operation.
///
/// See [`UploadLayerPart`](crate::client::fluent_builders::UploadLayerPart) for more information about the
/// operation and its arguments.
pub fn upload_layer_part(&self) -> fluent_builders::UploadLayerPart<C, M, R> {
fluent_builders::UploadLayerPart::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `BatchCheckLayerAvailability`.
///
/// <p>Checks the availability of one or more image layers in a repository.</p>
/// <p>When an image is pushed to a repository, each image layer is checked to verify if it
/// has been uploaded before. If it has been uploaded, then the image layer is
/// skipped.</p>
/// <note>
/// <p>This operation is used by the Amazon ECR proxy and is not generally used by
/// customers for pulling and pushing images. In most cases, you should use the <code>docker</code> CLI to pull, tag, and push images.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct BatchCheckLayerAvailability<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::batch_check_layer_availability_input::Builder,
}
impl<C, M, R> BatchCheckLayerAvailability<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `BatchCheckLayerAvailability`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::BatchCheckLayerAvailabilityOutput,
aws_smithy_http::result::SdkError<crate::error::BatchCheckLayerAvailabilityError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::BatchCheckLayerAvailabilityInputOperationOutputAlias,
crate::output::BatchCheckLayerAvailabilityOutput,
crate::error::BatchCheckLayerAvailabilityError,
crate::input::BatchCheckLayerAvailabilityInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the image layers to
/// check. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the image layers to
/// check. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository that is associated with the image layers to check.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository that is associated with the image layers to check.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// Appends an item to `layerDigests`.
///
/// To override the contents of this collection use [`set_layer_digests`](Self::set_layer_digests).
///
/// <p>The digests of the image layers to check.</p>
pub fn layer_digests(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_digests(inp);
self
}
/// <p>The digests of the image layers to check.</p>
pub fn set_layer_digests(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_layer_digests(input);
self
}
}
/// Fluent builder constructing a request to `BatchDeleteImage`.
///
/// <p>Deletes a list of specified images within a repository. Images are specified with
/// either an <code>imageTag</code> or <code>imageDigest</code>.</p>
/// <p>You can remove a tag from an image by specifying the image's tag in your request. When
/// you remove the last tag from an image, the image is deleted from your repository.</p>
/// <p>You can completely delete an image (and all of its tags) by specifying the image's
/// digest in your request.</p>
#[derive(std::fmt::Debug)]
pub struct BatchDeleteImage<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::batch_delete_image_input::Builder,
}
impl<C, M, R> BatchDeleteImage<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `BatchDeleteImage`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::BatchDeleteImageOutput,
aws_smithy_http::result::SdkError<crate::error::BatchDeleteImageError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::BatchDeleteImageInputOperationOutputAlias,
crate::output::BatchDeleteImageOutput,
crate::error::BatchDeleteImageError,
crate::input::BatchDeleteImageInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the image to delete.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the image to delete.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The repository that contains the image to delete.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The repository that contains the image to delete.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// Appends an item to `imageIds`.
///
/// To override the contents of this collection use [`set_image_ids`](Self::set_image_ids).
///
/// <p>A list of image ID references that correspond to images to delete. The format of the
/// <code>imageIds</code> reference is <code>imageTag=tag</code> or
/// <code>imageDigest=digest</code>.</p>
pub fn image_ids(mut self, inp: impl Into<crate::model::ImageIdentifier>) -> Self {
self.inner = self.inner.image_ids(inp);
self
}
/// <p>A list of image ID references that correspond to images to delete. The format of the
/// <code>imageIds</code> reference is <code>imageTag=tag</code> or
/// <code>imageDigest=digest</code>.</p>
pub fn set_image_ids(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ImageIdentifier>>,
) -> Self {
self.inner = self.inner.set_image_ids(input);
self
}
}
/// Fluent builder constructing a request to `BatchGetImage`.
///
/// <p>Gets detailed information for an image. Images are specified with either an
/// <code>imageTag</code> or <code>imageDigest</code>.</p>
/// <p>When an image is pulled, the BatchGetImage API is called once to retrieve the image
/// manifest.</p>
#[derive(std::fmt::Debug)]
pub struct BatchGetImage<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::batch_get_image_input::Builder,
}
impl<C, M, R> BatchGetImage<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `BatchGetImage`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::BatchGetImageOutput,
aws_smithy_http::result::SdkError<crate::error::BatchGetImageError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::BatchGetImageInputOperationOutputAlias,
crate::output::BatchGetImageOutput,
crate::error::BatchGetImageError,
crate::input::BatchGetImageInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the images to
/// describe. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the images to
/// describe. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The repository that contains the images to describe.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The repository that contains the images to describe.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// Appends an item to `imageIds`.
///
/// To override the contents of this collection use [`set_image_ids`](Self::set_image_ids).
///
/// <p>A list of image ID references that correspond to images to describe. The format of the
/// <code>imageIds</code> reference is <code>imageTag=tag</code> or
/// <code>imageDigest=digest</code>.</p>
pub fn image_ids(mut self, inp: impl Into<crate::model::ImageIdentifier>) -> Self {
self.inner = self.inner.image_ids(inp);
self
}
/// <p>A list of image ID references that correspond to images to describe. The format of the
/// <code>imageIds</code> reference is <code>imageTag=tag</code> or
/// <code>imageDigest=digest</code>.</p>
pub fn set_image_ids(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ImageIdentifier>>,
) -> Self {
self.inner = self.inner.set_image_ids(input);
self
}
/// Appends an item to `acceptedMediaTypes`.
///
/// To override the contents of this collection use [`set_accepted_media_types`](Self::set_accepted_media_types).
///
/// <p>The accepted media types for the request.</p>
/// <p>Valid values: <code>application/vnd.docker.distribution.manifest.v1+json</code> |
/// <code>application/vnd.docker.distribution.manifest.v2+json</code> |
/// <code>application/vnd.oci.image.manifest.v1+json</code>
/// </p>
pub fn accepted_media_types(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.accepted_media_types(inp);
self
}
/// <p>The accepted media types for the request.</p>
/// <p>Valid values: <code>application/vnd.docker.distribution.manifest.v1+json</code> |
/// <code>application/vnd.docker.distribution.manifest.v2+json</code> |
/// <code>application/vnd.oci.image.manifest.v1+json</code>
/// </p>
pub fn set_accepted_media_types(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_accepted_media_types(input);
self
}
}
/// Fluent builder constructing a request to `CompleteLayerUpload`.
///
/// <p>Informs Amazon ECR that the image layer upload has completed for a specified registry,
/// repository name, and upload ID. You can optionally provide a <code>sha256</code> digest
/// of the image layer for data validation purposes.</p>
/// <p>When an image is pushed, the CompleteLayerUpload API is called once per each new image
/// layer to verify that the upload has completed.</p>
/// <note>
/// <p>This operation is used by the Amazon ECR proxy and is not generally used by
/// customers for pulling and pushing images. In most cases, you should use the <code>docker</code> CLI to pull, tag, and push images.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct CompleteLayerUpload<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::complete_layer_upload_input::Builder,
}
impl<C, M, R> CompleteLayerUpload<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CompleteLayerUpload`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CompleteLayerUploadOutput,
aws_smithy_http::result::SdkError<crate::error::CompleteLayerUploadError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CompleteLayerUploadInputOperationOutputAlias,
crate::output::CompleteLayerUploadOutput,
crate::error::CompleteLayerUploadError,
crate::input::CompleteLayerUploadInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry to which to upload layers.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry to which to upload layers.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository to associate with the image layer.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository to associate with the image layer.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The upload ID from a previous <a>InitiateLayerUpload</a> operation to
/// associate with the image layer.</p>
pub fn upload_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.upload_id(inp);
self
}
/// <p>The upload ID from a previous <a>InitiateLayerUpload</a> operation to
/// associate with the image layer.</p>
pub fn set_upload_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_upload_id(input);
self
}
/// Appends an item to `layerDigests`.
///
/// To override the contents of this collection use [`set_layer_digests`](Self::set_layer_digests).
///
/// <p>The <code>sha256</code> digest of the image layer.</p>
pub fn layer_digests(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_digests(inp);
self
}
/// <p>The <code>sha256</code> digest of the image layer.</p>
pub fn set_layer_digests(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_layer_digests(input);
self
}
}
/// Fluent builder constructing a request to `CreateRepository`.
///
/// <p>Creates a repository. For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html">Amazon ECR repositories</a> in the
/// <i>Amazon Elastic Container Registry User Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct CreateRepository<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_repository_input::Builder,
}
impl<C, M, R> CreateRepository<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateRepository`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateRepositoryOutput,
aws_smithy_http::result::SdkError<crate::error::CreateRepositoryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateRepositoryInputOperationOutputAlias,
crate::output::CreateRepositoryOutput,
crate::error::CreateRepositoryError,
crate::input::CreateRepositoryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The AWS account ID associated with the registry to create the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The AWS account ID associated with the registry to create the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name to use for the repository. The repository name may be specified on its own
/// (such as <code>nginx-web-app</code>) or it can be prepended with a namespace to group
/// the repository into a category (such as <code>project-a/nginx-web-app</code>).</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name to use for the repository. The repository name may be specified on its own
/// (such as <code>nginx-web-app</code>) or it can be prepended with a namespace to group
/// the repository into a category (such as <code>project-a/nginx-web-app</code>).</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The metadata that you apply to the repository to help you categorize and organize
/// them. Each tag consists of a key and an optional value, both of which you define.
/// Tag keys can have a maximum character length of 128 characters, and tag values can have
/// a maximum length of 256 characters.</p>
pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self {
self.inner = self.inner.tags(inp);
self
}
/// <p>The metadata that you apply to the repository to help you categorize and organize
/// them. Each tag consists of a key and an optional value, both of which you define.
/// Tag keys can have a maximum character length of 128 characters, and tag values can have
/// a maximum length of 256 characters.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
/// <p>The tag mutability setting for the repository. If this parameter is omitted, the
/// default setting of <code>MUTABLE</code> will be used which will allow image tags to be
/// overwritten. If <code>IMMUTABLE</code> is specified, all image tags within the
/// repository will be immutable which will prevent them from being overwritten.</p>
pub fn image_tag_mutability(mut self, inp: crate::model::ImageTagMutability) -> Self {
self.inner = self.inner.image_tag_mutability(inp);
self
}
/// <p>The tag mutability setting for the repository. If this parameter is omitted, the
/// default setting of <code>MUTABLE</code> will be used which will allow image tags to be
/// overwritten. If <code>IMMUTABLE</code> is specified, all image tags within the
/// repository will be immutable which will prevent them from being overwritten.</p>
pub fn set_image_tag_mutability(
mut self,
input: std::option::Option<crate::model::ImageTagMutability>,
) -> Self {
self.inner = self.inner.set_image_tag_mutability(input);
self
}
/// <p>The image scanning configuration for the repository. This determines whether images
/// are scanned for known vulnerabilities after being pushed to the repository.</p>
pub fn image_scanning_configuration(
mut self,
inp: crate::model::ImageScanningConfiguration,
) -> Self {
self.inner = self.inner.image_scanning_configuration(inp);
self
}
/// <p>The image scanning configuration for the repository. This determines whether images
/// are scanned for known vulnerabilities after being pushed to the repository.</p>
pub fn set_image_scanning_configuration(
mut self,
input: std::option::Option<crate::model::ImageScanningConfiguration>,
) -> Self {
self.inner = self.inner.set_image_scanning_configuration(input);
self
}
/// <p>The encryption configuration for the repository. This determines how the contents of
/// your repository are encrypted at rest.</p>
pub fn encryption_configuration(
mut self,
inp: crate::model::EncryptionConfiguration,
) -> Self {
self.inner = self.inner.encryption_configuration(inp);
self
}
/// <p>The encryption configuration for the repository. This determines how the contents of
/// your repository are encrypted at rest.</p>
pub fn set_encryption_configuration(
mut self,
input: std::option::Option<crate::model::EncryptionConfiguration>,
) -> Self {
self.inner = self.inner.set_encryption_configuration(input);
self
}
}
/// Fluent builder constructing a request to `DeleteLifecyclePolicy`.
///
/// <p>Deletes the lifecycle policy associated with the specified repository.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteLifecyclePolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_lifecycle_policy_input::Builder,
}
impl<C, M, R> DeleteLifecyclePolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteLifecyclePolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteLifecyclePolicyOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteLifecyclePolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteLifecyclePolicyInputOperationOutputAlias,
crate::output::DeleteLifecyclePolicyOutput,
crate::error::DeleteLifecyclePolicyError,
crate::input::DeleteLifecyclePolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteRegistryPolicy`.
///
/// <p>Deletes the registry permissions policy.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteRegistryPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_registry_policy_input::Builder,
}
impl<C, M, R> DeleteRegistryPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteRegistryPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteRegistryPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteRegistryPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteRegistryPolicyInputOperationOutputAlias,
crate::output::DeleteRegistryPolicyOutput,
crate::error::DeleteRegistryPolicyError,
crate::input::DeleteRegistryPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `DeleteRepository`.
///
/// <p>Deletes a repository. If the repository contains images, you must either delete all
/// images in the repository or use the <code>force</code> option to delete the
/// repository.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteRepository<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_repository_input::Builder,
}
impl<C, M, R> DeleteRepository<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteRepository`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteRepositoryOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteRepositoryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteRepositoryInputOperationOutputAlias,
crate::output::DeleteRepositoryOutput,
crate::error::DeleteRepositoryError,
crate::input::DeleteRepositoryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository to
/// delete. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository to
/// delete. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository to delete.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository to delete.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p> If a repository contains images, forces the deletion.</p>
pub fn force(mut self, inp: bool) -> Self {
self.inner = self.inner.force(inp);
self
}
/// <p> If a repository contains images, forces the deletion.</p>
pub fn set_force(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_force(input);
self
}
}
/// Fluent builder constructing a request to `DeleteRepositoryPolicy`.
///
/// <p>Deletes the repository policy associated with the specified repository.</p>
#[derive(std::fmt::Debug)]
pub struct DeleteRepositoryPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_repository_policy_input::Builder,
}
impl<C, M, R> DeleteRepositoryPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteRepositoryPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteRepositoryPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteRepositoryPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteRepositoryPolicyInputOperationOutputAlias,
crate::output::DeleteRepositoryPolicyOutput,
crate::error::DeleteRepositoryPolicyError,
crate::input::DeleteRepositoryPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository policy
/// to delete. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository policy
/// to delete. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository that is associated with the repository policy to
/// delete.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository that is associated with the repository policy to
/// delete.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeImageReplicationStatus`.
///
/// <p>Returns the replication status for a specified image.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeImageReplicationStatus<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_image_replication_status_input::Builder,
}
impl<C, M, R> DescribeImageReplicationStatus<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeImageReplicationStatus`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeImageReplicationStatusOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeImageReplicationStatusError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeImageReplicationStatusInputOperationOutputAlias,
crate::output::DescribeImageReplicationStatusOutput,
crate::error::DescribeImageReplicationStatusError,
crate::input::DescribeImageReplicationStatusInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the repository that the image is in.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository that the image is in.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>An object with identifying information for an image in an Amazon ECR repository.</p>
pub fn image_id(mut self, inp: crate::model::ImageIdentifier) -> Self {
self.inner = self.inner.image_id(inp);
self
}
/// <p>An object with identifying information for an image in an Amazon ECR repository.</p>
pub fn set_image_id(
mut self,
input: std::option::Option<crate::model::ImageIdentifier>,
) -> Self {
self.inner = self.inner.set_image_id(input);
self
}
/// <p>The Amazon Web Services account ID associated with the registry. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeImages`.
///
/// <p>Returns metadata about the images in a repository.</p>
/// <note>
/// <p>Beginning with Docker version 1.9, the Docker client compresses image layers
/// before pushing them to a V2 Docker registry. The output of the <code>docker
/// images</code> command shows the uncompressed image size, so it may return a
/// larger image size than the image sizes returned by <a>DescribeImages</a>.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct DescribeImages<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_images_input::Builder,
}
impl<C, M, R> DescribeImages<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeImages`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeImagesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeImagesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeImagesInputOperationOutputAlias,
crate::output::DescribeImagesOutput,
crate::error::DescribeImagesError,
crate::input::DescribeImagesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to describe images. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to describe images. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The repository that contains the images to describe.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The repository that contains the images to describe.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// Appends an item to `imageIds`.
///
/// To override the contents of this collection use [`set_image_ids`](Self::set_image_ids).
///
/// <p>The list of image IDs for the requested repository.</p>
pub fn image_ids(mut self, inp: impl Into<crate::model::ImageIdentifier>) -> Self {
self.inner = self.inner.image_ids(inp);
self
}
/// <p>The list of image IDs for the requested repository.</p>
pub fn set_image_ids(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ImageIdentifier>>,
) -> Self {
self.inner = self.inner.set_image_ids(input);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>DescribeImages</code> request where <code>maxResults</code> was used and the
/// results exceeded the value of that parameter. Pagination continues from the end of the
/// previous results that returned the <code>nextToken</code> value. This value is
/// <code>null</code> when there are no more results to return. This option cannot be
/// used when you specify images with <code>imageIds</code>.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>DescribeImages</code> request where <code>maxResults</code> was used and the
/// results exceeded the value of that parameter. Pagination continues from the end of the
/// previous results that returned the <code>nextToken</code> value. This value is
/// <code>null</code> when there are no more results to return. This option cannot be
/// used when you specify images with <code>imageIds</code>.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of repository results returned by <code>DescribeImages</code> in
/// paginated output. When this parameter is used, <code>DescribeImages</code> only returns
/// <code>maxResults</code> results in a single page along with a <code>nextToken</code>
/// response element. The remaining results of the initial request can be seen by sending
/// another <code>DescribeImages</code> request with the returned <code>nextToken</code>
/// value. This value can be between 1 and 1000. If this
/// parameter is not used, then <code>DescribeImages</code> returns up to
/// 100 results and a <code>nextToken</code> value, if applicable. This
/// option cannot be used when you specify images with <code>imageIds</code>.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The maximum number of repository results returned by <code>DescribeImages</code> in
/// paginated output. When this parameter is used, <code>DescribeImages</code> only returns
/// <code>maxResults</code> results in a single page along with a <code>nextToken</code>
/// response element. The remaining results of the initial request can be seen by sending
/// another <code>DescribeImages</code> request with the returned <code>nextToken</code>
/// value. This value can be between 1 and 1000. If this
/// parameter is not used, then <code>DescribeImages</code> returns up to
/// 100 results and a <code>nextToken</code> value, if applicable. This
/// option cannot be used when you specify images with <code>imageIds</code>.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The filter key and value with which to filter your <code>DescribeImages</code>
/// results.</p>
pub fn filter(mut self, inp: crate::model::DescribeImagesFilter) -> Self {
self.inner = self.inner.filter(inp);
self
}
/// <p>The filter key and value with which to filter your <code>DescribeImages</code>
/// results.</p>
pub fn set_filter(
mut self,
input: std::option::Option<crate::model::DescribeImagesFilter>,
) -> Self {
self.inner = self.inner.set_filter(input);
self
}
}
/// Fluent builder constructing a request to `DescribeImageScanFindings`.
///
/// <p>Returns the scan findings for the specified image.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeImageScanFindings<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_image_scan_findings_input::Builder,
}
impl<C, M, R> DescribeImageScanFindings<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeImageScanFindings`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeImageScanFindingsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeImageScanFindingsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeImageScanFindingsInputOperationOutputAlias,
crate::output::DescribeImageScanFindingsOutput,
crate::error::DescribeImageScanFindingsError,
crate::input::DescribeImageScanFindingsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to describe the image scan findings for. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to describe the image scan findings for. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The repository for the image for which to describe the scan findings.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The repository for the image for which to describe the scan findings.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>An object with identifying information for an image in an Amazon ECR repository.</p>
pub fn image_id(mut self, inp: crate::model::ImageIdentifier) -> Self {
self.inner = self.inner.image_id(inp);
self
}
/// <p>An object with identifying information for an image in an Amazon ECR repository.</p>
pub fn set_image_id(
mut self,
input: std::option::Option<crate::model::ImageIdentifier>,
) -> Self {
self.inner = self.inner.set_image_id(input);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>DescribeImageScanFindings</code> request where <code>maxResults</code> was
/// used and the results exceeded the value of that parameter. Pagination continues from the
/// end of the previous results that returned the <code>nextToken</code> value. This value
/// is null when there are no more results to return.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>DescribeImageScanFindings</code> request where <code>maxResults</code> was
/// used and the results exceeded the value of that parameter. Pagination continues from the
/// end of the previous results that returned the <code>nextToken</code> value. This value
/// is null when there are no more results to return.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of image scan results returned by
/// <code>DescribeImageScanFindings</code> in paginated output. When this parameter is
/// used, <code>DescribeImageScanFindings</code> only returns <code>maxResults</code>
/// results in a single page along with a <code>nextToken</code> response element. The
/// remaining results of the initial request can be seen by sending another
/// <code>DescribeImageScanFindings</code> request with the returned
/// <code>nextToken</code> value. This value can be between 1 and 1000. If this
/// parameter is not used, then <code>DescribeImageScanFindings</code> returns up to 100
/// results and a <code>nextToken</code> value, if applicable.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The maximum number of image scan results returned by
/// <code>DescribeImageScanFindings</code> in paginated output. When this parameter is
/// used, <code>DescribeImageScanFindings</code> only returns <code>maxResults</code>
/// results in a single page along with a <code>nextToken</code> response element. The
/// remaining results of the initial request can be seen by sending another
/// <code>DescribeImageScanFindings</code> request with the returned
/// <code>nextToken</code> value. This value can be between 1 and 1000. If this
/// parameter is not used, then <code>DescribeImageScanFindings</code> returns up to 100
/// results and a <code>nextToken</code> value, if applicable.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `DescribeRegistry`.
///
/// <p>Describes the settings for a registry. The replication configuration for a repository
/// can be created or updated with the <a>PutReplicationConfiguration</a> API
/// action.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeRegistry<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_registry_input::Builder,
}
impl<C, M, R> DescribeRegistry<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeRegistry`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeRegistryOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeRegistryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeRegistryInputOperationOutputAlias,
crate::output::DescribeRegistryOutput,
crate::error::DescribeRegistryError,
crate::input::DescribeRegistryInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `DescribeRepositories`.
///
/// <p>Describes image repositories in a registry.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeRepositories<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_repositories_input::Builder,
}
impl<C, M, R> DescribeRepositories<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeRepositories`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeRepositoriesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeRepositoriesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeRepositoriesInputOperationOutputAlias,
crate::output::DescribeRepositoriesOutput,
crate::error::DescribeRepositoriesError,
crate::input::DescribeRepositoriesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repositories to be
/// described. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repositories to be
/// described. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// Appends an item to `repositoryNames`.
///
/// To override the contents of this collection use [`set_repository_names`](Self::set_repository_names).
///
/// <p>A list of repositories to describe. If this parameter is omitted, then all
/// repositories in a registry are described.</p>
pub fn repository_names(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_names(inp);
self
}
/// <p>A list of repositories to describe. If this parameter is omitted, then all
/// repositories in a registry are described.</p>
pub fn set_repository_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_repository_names(input);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>DescribeRepositories</code> request where <code>maxResults</code> was used and
/// the results exceeded the value of that parameter. Pagination continues from the end of
/// the previous results that returned the <code>nextToken</code> value. This value is
/// <code>null</code> when there are no more results to return. This option cannot be
/// used when you specify repositories with <code>repositoryNames</code>.</p>
/// <note>
/// <p>This token should be treated as an opaque identifier that is only used to
/// retrieve the next items in a list and not for other programmatic purposes.</p>
/// </note>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>DescribeRepositories</code> request where <code>maxResults</code> was used and
/// the results exceeded the value of that parameter. Pagination continues from the end of
/// the previous results that returned the <code>nextToken</code> value. This value is
/// <code>null</code> when there are no more results to return. This option cannot be
/// used when you specify repositories with <code>repositoryNames</code>.</p>
/// <note>
/// <p>This token should be treated as an opaque identifier that is only used to
/// retrieve the next items in a list and not for other programmatic purposes.</p>
/// </note>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of repository results returned by <code>DescribeRepositories</code>
/// in paginated output. When this parameter is used, <code>DescribeRepositories</code> only
/// returns <code>maxResults</code> results in a single page along with a
/// <code>nextToken</code> response element. The remaining results of the initial
/// request can be seen by sending another <code>DescribeRepositories</code> request with
/// the returned <code>nextToken</code> value. This value can be between 1
/// and 1000. If this parameter is not used, then
/// <code>DescribeRepositories</code> returns up to 100 results and a
/// <code>nextToken</code> value, if applicable. This option cannot be used when you
/// specify repositories with <code>repositoryNames</code>.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The maximum number of repository results returned by <code>DescribeRepositories</code>
/// in paginated output. When this parameter is used, <code>DescribeRepositories</code> only
/// returns <code>maxResults</code> results in a single page along with a
/// <code>nextToken</code> response element. The remaining results of the initial
/// request can be seen by sending another <code>DescribeRepositories</code> request with
/// the returned <code>nextToken</code> value. This value can be between 1
/// and 1000. If this parameter is not used, then
/// <code>DescribeRepositories</code> returns up to 100 results and a
/// <code>nextToken</code> value, if applicable. This option cannot be used when you
/// specify repositories with <code>repositoryNames</code>.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `GetAuthorizationToken`.
///
/// <p>Retrieves an authorization token. An authorization token represents your IAM
/// authentication credentials and can be used to access any Amazon ECR registry that your IAM
/// principal has access to. The authorization token is valid for 12 hours.</p>
/// <p>The <code>authorizationToken</code> returned is a base64 encoded string that can be
/// decoded and used in a <code>docker login</code> command to authenticate to a registry.
/// The CLI offers an <code>get-login-password</code> command that simplifies the login
/// process. For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth">Registry
/// authentication</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct GetAuthorizationToken<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_authorization_token_input::Builder,
}
impl<C, M, R> GetAuthorizationToken<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetAuthorizationToken`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetAuthorizationTokenOutput,
aws_smithy_http::result::SdkError<crate::error::GetAuthorizationTokenError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetAuthorizationTokenInputOperationOutputAlias,
crate::output::GetAuthorizationTokenOutput,
crate::error::GetAuthorizationTokenError,
crate::input::GetAuthorizationTokenInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Appends an item to `registryIds`.
///
/// To override the contents of this collection use [`set_registry_ids`](Self::set_registry_ids).
///
/// <p>A list of Amazon Web Services account IDs that are associated with the registries for which to get
/// AuthorizationData objects. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_ids(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_ids(inp);
self
}
/// <p>A list of Amazon Web Services account IDs that are associated with the registries for which to get
/// AuthorizationData objects. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_registry_ids(input);
self
}
}
/// Fluent builder constructing a request to `GetDownloadUrlForLayer`.
///
/// <p>Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can
/// only get URLs for image layers that are referenced in an image.</p>
/// <p>When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer
/// that is not already cached.</p>
/// <note>
/// <p>This operation is used by the Amazon ECR proxy and is not generally used by
/// customers for pulling and pushing images. In most cases, you should use the <code>docker</code> CLI to pull, tag, and push images.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct GetDownloadUrlForLayer<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_download_url_for_layer_input::Builder,
}
impl<C, M, R> GetDownloadUrlForLayer<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetDownloadUrlForLayer`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetDownloadUrlForLayerOutput,
aws_smithy_http::result::SdkError<crate::error::GetDownloadUrlForLayerError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetDownloadUrlForLayerInputOperationOutputAlias,
crate::output::GetDownloadUrlForLayerOutput,
crate::error::GetDownloadUrlForLayerError,
crate::input::GetDownloadUrlForLayerInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the image layer to
/// download. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the image layer to
/// download. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository that is associated with the image layer to download.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository that is associated with the image layer to download.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The digest of the image layer to download.</p>
pub fn layer_digest(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_digest(inp);
self
}
/// <p>The digest of the image layer to download.</p>
pub fn set_layer_digest(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_digest(input);
self
}
}
/// Fluent builder constructing a request to `GetLifecyclePolicy`.
///
/// <p>Retrieves the lifecycle policy for the specified repository.</p>
#[derive(std::fmt::Debug)]
pub struct GetLifecyclePolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_lifecycle_policy_input::Builder,
}
impl<C, M, R> GetLifecyclePolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetLifecyclePolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLifecyclePolicyOutput,
aws_smithy_http::result::SdkError<crate::error::GetLifecyclePolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetLifecyclePolicyInputOperationOutputAlias,
crate::output::GetLifecyclePolicyOutput,
crate::error::GetLifecyclePolicyError,
crate::input::GetLifecyclePolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
}
/// Fluent builder constructing a request to `GetLifecyclePolicyPreview`.
///
/// <p>Retrieves the results of the lifecycle policy preview request for the specified
/// repository.</p>
#[derive(std::fmt::Debug)]
pub struct GetLifecyclePolicyPreview<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_lifecycle_policy_preview_input::Builder,
}
impl<C, M, R> GetLifecyclePolicyPreview<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetLifecyclePolicyPreview`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLifecyclePolicyPreviewOutput,
aws_smithy_http::result::SdkError<crate::error::GetLifecyclePolicyPreviewError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetLifecyclePolicyPreviewInputOperationOutputAlias,
crate::output::GetLifecyclePolicyPreviewOutput,
crate::error::GetLifecyclePolicyPreviewError,
crate::input::GetLifecyclePolicyPreviewInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// Appends an item to `imageIds`.
///
/// To override the contents of this collection use [`set_image_ids`](Self::set_image_ids).
///
/// <p>The list of imageIDs to be included.</p>
pub fn image_ids(mut self, inp: impl Into<crate::model::ImageIdentifier>) -> Self {
self.inner = self.inner.image_ids(inp);
self
}
/// <p>The list of imageIDs to be included.</p>
pub fn set_image_ids(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ImageIdentifier>>,
) -> Self {
self.inner = self.inner.set_image_ids(input);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>GetLifecyclePolicyPreviewRequest</code> request where <code>maxResults</code>
/// was used and the
results exceeded the value of that parameter. Pagination continues
/// from the end of the
previous results that returned the <code>nextToken</code> value.
/// This value is
<code>null</code> when there are no more results to return. This option
/// cannot be used when you specify images with <code>imageIds</code>.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>GetLifecyclePolicyPreviewRequest</code> request where <code>maxResults</code>
/// was used and the
results exceeded the value of that parameter. Pagination continues
/// from the end of the
previous results that returned the <code>nextToken</code> value.
/// This value is
<code>null</code> when there are no more results to return. This option
/// cannot be used when you specify images with <code>imageIds</code>.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of repository results returned by
/// <code>GetLifecyclePolicyPreviewRequest</code> in
paginated output. When this
/// parameter is used, <code>GetLifecyclePolicyPreviewRequest</code> only returns
/// <code>maxResults</code> results in a single page along with a
/// <code>nextToken</code>
response element. The remaining results of the initial request
/// can be seen by sending
another <code>GetLifecyclePolicyPreviewRequest</code> request
/// with the returned <code>nextToken</code>
value. This value can be between
/// 1 and 1000. If this
parameter is not used, then
/// <code>GetLifecyclePolicyPreviewRequest</code> returns up to
100
/// results and a <code>nextToken</code> value, if
applicable. This option cannot be used
/// when you specify images with <code>imageIds</code>.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The maximum number of repository results returned by
/// <code>GetLifecyclePolicyPreviewRequest</code> in
paginated output. When this
/// parameter is used, <code>GetLifecyclePolicyPreviewRequest</code> only returns
/// <code>maxResults</code> results in a single page along with a
/// <code>nextToken</code>
response element. The remaining results of the initial request
/// can be seen by sending
another <code>GetLifecyclePolicyPreviewRequest</code> request
/// with the returned <code>nextToken</code>
value. This value can be between
/// 1 and 1000. If this
parameter is not used, then
/// <code>GetLifecyclePolicyPreviewRequest</code> returns up to
100
/// results and a <code>nextToken</code> value, if
applicable. This option cannot be used
/// when you specify images with <code>imageIds</code>.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>An optional parameter that filters results based on image tag status and all tags, if
/// tagged.</p>
pub fn filter(mut self, inp: crate::model::LifecyclePolicyPreviewFilter) -> Self {
self.inner = self.inner.filter(inp);
self
}
/// <p>An optional parameter that filters results based on image tag status and all tags, if
/// tagged.</p>
pub fn set_filter(
mut self,
input: std::option::Option<crate::model::LifecyclePolicyPreviewFilter>,
) -> Self {
self.inner = self.inner.set_filter(input);
self
}
}
/// Fluent builder constructing a request to `GetRegistryPolicy`.
///
/// <p>Retrieves the permissions policy for a registry.</p>
#[derive(std::fmt::Debug)]
pub struct GetRegistryPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_registry_policy_input::Builder,
}
impl<C, M, R> GetRegistryPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetRegistryPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetRegistryPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::GetRegistryPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetRegistryPolicyInputOperationOutputAlias,
crate::output::GetRegistryPolicyOutput,
crate::error::GetRegistryPolicyError,
crate::input::GetRegistryPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `GetRepositoryPolicy`.
///
/// <p>Retrieves the repository policy for the specified repository.</p>
#[derive(std::fmt::Debug)]
pub struct GetRepositoryPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_repository_policy_input::Builder,
}
impl<C, M, R> GetRepositoryPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetRepositoryPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetRepositoryPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::GetRepositoryPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetRepositoryPolicyInputOperationOutputAlias,
crate::output::GetRepositoryPolicyOutput,
crate::error::GetRepositoryPolicyError,
crate::input::GetRepositoryPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository with the policy to retrieve.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository with the policy to retrieve.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
}
/// Fluent builder constructing a request to `InitiateLayerUpload`.
///
/// <p>Notifies Amazon ECR that you intend to upload an image layer.</p>
/// <p>When an image is pushed, the InitiateLayerUpload API is called once per image layer
/// that has not already been uploaded. Whether or not an image layer has been uploaded is
/// determined by the BatchCheckLayerAvailability API action.</p>
/// <note>
/// <p>This operation is used by the Amazon ECR proxy and is not generally used by
/// customers for pulling and pushing images. In most cases, you should use the <code>docker</code> CLI to pull, tag, and push images.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct InitiateLayerUpload<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::initiate_layer_upload_input::Builder,
}
impl<C, M, R> InitiateLayerUpload<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `InitiateLayerUpload`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::InitiateLayerUploadOutput,
aws_smithy_http::result::SdkError<crate::error::InitiateLayerUploadError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::InitiateLayerUploadInputOperationOutputAlias,
crate::output::InitiateLayerUploadOutput,
crate::error::InitiateLayerUploadError,
crate::input::InitiateLayerUploadInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry to which you intend to upload
/// layers. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry to which you intend to upload
/// layers. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository to which you intend to upload layers.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository to which you intend to upload layers.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
}
/// Fluent builder constructing a request to `ListImages`.
///
/// <p>Lists all the image IDs for the specified repository.</p>
/// <p>You can filter images based on whether or not they are tagged by using the
/// <code>tagStatus</code> filter and specifying either <code>TAGGED</code>,
/// <code>UNTAGGED</code> or <code>ANY</code>. For example, you can filter your results
/// to return only <code>UNTAGGED</code> images and then pipe that result to a <a>BatchDeleteImage</a> operation to delete them. Or, you can filter your
/// results to return only <code>TAGGED</code> images to list all of the tags in your
/// repository.</p>
#[derive(std::fmt::Debug)]
pub struct ListImages<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_images_input::Builder,
}
impl<C, M, R> ListImages<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListImages`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListImagesOutput,
aws_smithy_http::result::SdkError<crate::error::ListImagesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListImagesInputOperationOutputAlias,
crate::output::ListImagesOutput,
crate::error::ListImagesError,
crate::input::ListImagesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to list images. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to list images. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The repository with image IDs to be listed.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The repository with image IDs to be listed.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>ListImages</code> request where <code>maxResults</code> was used and the
/// results exceeded the value of that parameter. Pagination continues from the end of the
/// previous results that returned the <code>nextToken</code> value. This value is
/// <code>null</code> when there are no more results to return.</p>
/// <note>
/// <p>This token should be treated as an opaque identifier that is only used to
/// retrieve the next items in a list and not for other programmatic purposes.</p>
/// </note>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>The <code>nextToken</code> value returned from a previous paginated
/// <code>ListImages</code> request where <code>maxResults</code> was used and the
/// results exceeded the value of that parameter. Pagination continues from the end of the
/// previous results that returned the <code>nextToken</code> value. This value is
/// <code>null</code> when there are no more results to return.</p>
/// <note>
/// <p>This token should be treated as an opaque identifier that is only used to
/// retrieve the next items in a list and not for other programmatic purposes.</p>
/// </note>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>The maximum number of image results returned by <code>ListImages</code> in paginated
/// output. When this parameter is used, <code>ListImages</code> only returns
/// <code>maxResults</code> results in a single page along with a <code>nextToken</code>
/// response element. The remaining results of the initial request can be seen by sending
/// another <code>ListImages</code> request with the returned <code>nextToken</code> value.
/// This value can be between 1 and 1000. If this parameter is
/// not used, then <code>ListImages</code> returns up to 100 results and a
/// <code>nextToken</code> value, if applicable.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The maximum number of image results returned by <code>ListImages</code> in paginated
/// output. When this parameter is used, <code>ListImages</code> only returns
/// <code>maxResults</code> results in a single page along with a <code>nextToken</code>
/// response element. The remaining results of the initial request can be seen by sending
/// another <code>ListImages</code> request with the returned <code>nextToken</code> value.
/// This value can be between 1 and 1000. If this parameter is
/// not used, then <code>ListImages</code> returns up to 100 results and a
/// <code>nextToken</code> value, if applicable.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The filter key and value with which to filter your <code>ListImages</code>
/// results.</p>
pub fn filter(mut self, inp: crate::model::ListImagesFilter) -> Self {
self.inner = self.inner.filter(inp);
self
}
/// <p>The filter key and value with which to filter your <code>ListImages</code>
/// results.</p>
pub fn set_filter(
mut self,
input: std::option::Option<crate::model::ListImagesFilter>,
) -> Self {
self.inner = self.inner.set_filter(input);
self
}
}
/// Fluent builder constructing a request to `ListTagsForResource`.
///
/// <p>List the tags for an Amazon ECR resource.</p>
#[derive(std::fmt::Debug)]
pub struct ListTagsForResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_tags_for_resource_input::Builder,
}
impl<C, M, R> ListTagsForResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListTagsForResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsForResourceInputOperationOutputAlias,
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
crate::input::ListTagsForResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the
/// only supported resource is an Amazon ECR repository.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the
/// only supported resource is an Amazon ECR repository.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
}
/// Fluent builder constructing a request to `PutImage`.
///
/// <p>Creates or updates the image manifest and tags associated with an image.</p>
/// <p>When an image is pushed and all new image layers have been uploaded, the PutImage API
/// is called once to create or update the image manifest and the tags associated with the
/// image.</p>
///
/// <note>
/// <p>This operation is used by the Amazon ECR proxy and is not generally used by
/// customers for pulling and pushing images. In most cases, you should use the <code>docker</code> CLI to pull, tag, and push images.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct PutImage<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_image_input::Builder,
}
impl<C, M, R> PutImage<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutImage`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutImageOutput,
aws_smithy_http::result::SdkError<crate::error::PutImageError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutImageInputOperationOutputAlias,
crate::output::PutImageOutput,
crate::error::PutImageError,
crate::input::PutImageInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to put the image. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to put the image. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository in which to put the image.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository in which to put the image.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The image manifest corresponding to the image to be uploaded.</p>
pub fn image_manifest(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.image_manifest(inp);
self
}
/// <p>The image manifest corresponding to the image to be uploaded.</p>
pub fn set_image_manifest(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_image_manifest(input);
self
}
/// <p>The media type of the image manifest. If you push an image manifest that does not
/// contain the <code>mediaType</code> field, you must specify the
/// <code>imageManifestMediaType</code> in the request.</p>
pub fn image_manifest_media_type(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.image_manifest_media_type(inp);
self
}
/// <p>The media type of the image manifest. If you push an image manifest that does not
/// contain the <code>mediaType</code> field, you must specify the
/// <code>imageManifestMediaType</code> in the request.</p>
pub fn set_image_manifest_media_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_image_manifest_media_type(input);
self
}
/// <p>The tag to associate with the image. This parameter is required for images that use
/// the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) formats.</p>
pub fn image_tag(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.image_tag(inp);
self
}
/// <p>The tag to associate with the image. This parameter is required for images that use
/// the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) formats.</p>
pub fn set_image_tag(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_image_tag(input);
self
}
/// <p>The image digest of the image manifest corresponding to the image.</p>
pub fn image_digest(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.image_digest(inp);
self
}
/// <p>The image digest of the image manifest corresponding to the image.</p>
pub fn set_image_digest(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_image_digest(input);
self
}
}
/// Fluent builder constructing a request to `PutImageScanningConfiguration`.
///
/// <p>Updates the image scanning configuration for the specified repository.</p>
#[derive(std::fmt::Debug)]
pub struct PutImageScanningConfiguration<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_image_scanning_configuration_input::Builder,
}
impl<C, M, R> PutImageScanningConfiguration<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutImageScanningConfiguration`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutImageScanningConfigurationOutput,
aws_smithy_http::result::SdkError<crate::error::PutImageScanningConfigurationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutImageScanningConfigurationInputOperationOutputAlias,
crate::output::PutImageScanningConfigurationOutput,
crate::error::PutImageScanningConfigurationError,
crate::input::PutImageScanningConfigurationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to update the image scanning configuration setting.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to update the image scanning configuration setting.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository in which to update the image scanning configuration
/// setting.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository in which to update the image scanning configuration
/// setting.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The image scanning configuration for the repository. This setting determines whether
/// images are scanned for known vulnerabilities after being pushed to the
/// repository.</p>
pub fn image_scanning_configuration(
mut self,
inp: crate::model::ImageScanningConfiguration,
) -> Self {
self.inner = self.inner.image_scanning_configuration(inp);
self
}
/// <p>The image scanning configuration for the repository. This setting determines whether
/// images are scanned for known vulnerabilities after being pushed to the
/// repository.</p>
pub fn set_image_scanning_configuration(
mut self,
input: std::option::Option<crate::model::ImageScanningConfiguration>,
) -> Self {
self.inner = self.inner.set_image_scanning_configuration(input);
self
}
}
/// Fluent builder constructing a request to `PutImageTagMutability`.
///
/// <p>Updates the image tag mutability settings for the specified repository. For more
/// information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html">Image tag
/// mutability</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct PutImageTagMutability<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_image_tag_mutability_input::Builder,
}
impl<C, M, R> PutImageTagMutability<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutImageTagMutability`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutImageTagMutabilityOutput,
aws_smithy_http::result::SdkError<crate::error::PutImageTagMutabilityError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutImageTagMutabilityInputOperationOutputAlias,
crate::output::PutImageTagMutabilityOutput,
crate::error::PutImageTagMutabilityError,
crate::input::PutImageTagMutabilityInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to update the image tag mutability settings. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to update the image tag mutability settings. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository in which to update the image tag mutability
/// settings.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository in which to update the image tag mutability
/// settings.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The tag mutability setting for the repository. If <code>MUTABLE</code> is specified,
/// image tags can be overwritten. If <code>IMMUTABLE</code> is specified, all image tags
/// within the repository will be immutable which will prevent them from being
/// overwritten.</p>
pub fn image_tag_mutability(mut self, inp: crate::model::ImageTagMutability) -> Self {
self.inner = self.inner.image_tag_mutability(inp);
self
}
/// <p>The tag mutability setting for the repository. If <code>MUTABLE</code> is specified,
/// image tags can be overwritten. If <code>IMMUTABLE</code> is specified, all image tags
/// within the repository will be immutable which will prevent them from being
/// overwritten.</p>
pub fn set_image_tag_mutability(
mut self,
input: std::option::Option<crate::model::ImageTagMutability>,
) -> Self {
self.inner = self.inner.set_image_tag_mutability(input);
self
}
}
/// Fluent builder constructing a request to `PutLifecyclePolicy`.
///
/// <p>Creates or updates the lifecycle policy for the specified repository. For more
/// information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html">Lifecycle policy
/// template</a>.</p>
#[derive(std::fmt::Debug)]
pub struct PutLifecyclePolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_lifecycle_policy_input::Builder,
}
impl<C, M, R> PutLifecyclePolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutLifecyclePolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutLifecyclePolicyOutput,
aws_smithy_http::result::SdkError<crate::error::PutLifecyclePolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutLifecyclePolicyInputOperationOutputAlias,
crate::output::PutLifecyclePolicyOutput,
crate::error::PutLifecyclePolicyError,
crate::input::PutLifecyclePolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository. If you
/// do
not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository. If you
/// do
not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository to receive the policy.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository to receive the policy.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The JSON repository policy text to apply to the repository.</p>
pub fn lifecycle_policy_text(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.lifecycle_policy_text(inp);
self
}
/// <p>The JSON repository policy text to apply to the repository.</p>
pub fn set_lifecycle_policy_text(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_lifecycle_policy_text(input);
self
}
}
/// Fluent builder constructing a request to `PutRegistryPolicy`.
///
/// <p>Creates or updates the permissions policy for your registry.</p>
/// <p>A registry policy is used to specify permissions for another Amazon Web Services account and is used
/// when configuring cross-account replication. For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html">Registry permissions</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct PutRegistryPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_registry_policy_input::Builder,
}
impl<C, M, R> PutRegistryPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutRegistryPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutRegistryPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::PutRegistryPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutRegistryPolicyInputOperationOutputAlias,
crate::output::PutRegistryPolicyOutput,
crate::error::PutRegistryPolicyError,
crate::input::PutRegistryPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The JSON policy text to apply to your registry. The policy text follows the same
/// format as IAM policy text. For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html">Registry
/// permissions</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
pub fn policy_text(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.policy_text(inp);
self
}
/// <p>The JSON policy text to apply to your registry. The policy text follows the same
/// format as IAM policy text. For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html">Registry
/// permissions</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
pub fn set_policy_text(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_policy_text(input);
self
}
}
/// Fluent builder constructing a request to `PutReplicationConfiguration`.
///
/// <p>Creates or updates the replication configuration for a registry. The existing
/// replication configuration for a repository can be retrieved with the <a>DescribeRegistry</a> API action. The first time the
/// PutReplicationConfiguration API is called, a service-linked IAM role is created in
/// your account for the replication process. For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html">Using
/// service-linked roles for Amazon ECR</a> in the
/// <i>Amazon Elastic Container Registry User Guide</i>.</p>
/// <note>
/// <p>When configuring cross-account replication, the destination account must grant the
/// source account permission to replicate. This permission is controlled using a
/// registry permissions policy. For more information, see <a>PutRegistryPolicy</a>.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct PutReplicationConfiguration<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_replication_configuration_input::Builder,
}
impl<C, M, R> PutReplicationConfiguration<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutReplicationConfiguration`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutReplicationConfigurationOutput,
aws_smithy_http::result::SdkError<crate::error::PutReplicationConfigurationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutReplicationConfigurationInputOperationOutputAlias,
crate::output::PutReplicationConfigurationOutput,
crate::error::PutReplicationConfigurationError,
crate::input::PutReplicationConfigurationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>An object representing the replication configuration for a registry.</p>
pub fn replication_configuration(
mut self,
inp: crate::model::ReplicationConfiguration,
) -> Self {
self.inner = self.inner.replication_configuration(inp);
self
}
/// <p>An object representing the replication configuration for a registry.</p>
pub fn set_replication_configuration(
mut self,
input: std::option::Option<crate::model::ReplicationConfiguration>,
) -> Self {
self.inner = self.inner.set_replication_configuration(input);
self
}
}
/// Fluent builder constructing a request to `SetRepositoryPolicy`.
///
/// <p>Applies a repository policy to the specified repository to control access permissions.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html">Amazon ECR Repository
/// policies</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct SetRepositoryPolicy<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::set_repository_policy_input::Builder,
}
impl<C, M, R> SetRepositoryPolicy<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `SetRepositoryPolicy`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::SetRepositoryPolicyOutput,
aws_smithy_http::result::SdkError<crate::error::SetRepositoryPolicyError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::SetRepositoryPolicyInputOperationOutputAlias,
crate::output::SetRepositoryPolicyOutput,
crate::error::SetRepositoryPolicyError,
crate::input::SetRepositoryPolicyInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository to receive the policy.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository to receive the policy.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The JSON repository policy text to apply to the repository. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html">Amazon ECR repository
/// policies</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
pub fn policy_text(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.policy_text(inp);
self
}
/// <p>The JSON repository policy text to apply to the repository. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html">Amazon ECR repository
/// policies</a> in the <i>Amazon Elastic Container Registry User Guide</i>.</p>
pub fn set_policy_text(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_policy_text(input);
self
}
/// <p>If the policy you are attempting to set on a repository policy would prevent you from
/// setting another policy in the future, you must force the <a>SetRepositoryPolicy</a> operation. This is intended to prevent accidental
/// repository lock outs.</p>
pub fn force(mut self, inp: bool) -> Self {
self.inner = self.inner.force(inp);
self
}
/// <p>If the policy you are attempting to set on a repository policy would prevent you from
/// setting another policy in the future, you must force the <a>SetRepositoryPolicy</a> operation. This is intended to prevent accidental
/// repository lock outs.</p>
pub fn set_force(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_force(input);
self
}
}
/// Fluent builder constructing a request to `StartImageScan`.
///
/// <p>Starts an image vulnerability scan. An image scan can only be started once per 24
/// hours on an individual image. This limit includes if an image was scanned on initial
/// push. For more information, see <a href="https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html">Image scanning</a> in the
/// <i>Amazon Elastic Container Registry User Guide</i>.</p>
#[derive(std::fmt::Debug)]
pub struct StartImageScan<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_image_scan_input::Builder,
}
impl<C, M, R> StartImageScan<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StartImageScan`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartImageScanOutput,
aws_smithy_http::result::SdkError<crate::error::StartImageScanError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartImageScanInputOperationOutputAlias,
crate::output::StartImageScanOutput,
crate::error::StartImageScanError,
crate::input::StartImageScanInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to start an image scan request. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository in
/// which to start an image scan request. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository that contains the images to scan.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository that contains the images to scan.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>An object with identifying information for an image in an Amazon ECR repository.</p>
pub fn image_id(mut self, inp: crate::model::ImageIdentifier) -> Self {
self.inner = self.inner.image_id(inp);
self
}
/// <p>An object with identifying information for an image in an Amazon ECR repository.</p>
pub fn set_image_id(
mut self,
input: std::option::Option<crate::model::ImageIdentifier>,
) -> Self {
self.inner = self.inner.set_image_id(input);
self
}
}
/// Fluent builder constructing a request to `StartLifecyclePolicyPreview`.
///
/// <p>Starts a preview of a lifecycle policy for the specified repository. This allows you
/// to see the results before associating the lifecycle policy with the repository.</p>
#[derive(std::fmt::Debug)]
pub struct StartLifecyclePolicyPreview<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_lifecycle_policy_preview_input::Builder,
}
impl<C, M, R> StartLifecyclePolicyPreview<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StartLifecyclePolicyPreview`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartLifecyclePolicyPreviewOutput,
aws_smithy_http::result::SdkError<crate::error::StartLifecyclePolicyPreviewError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartLifecyclePolicyPreviewInputOperationOutputAlias,
crate::output::StartLifecyclePolicyPreviewOutput,
crate::error::StartLifecyclePolicyPreviewError,
crate::input::StartLifecyclePolicyPreviewInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry that contains the repository.
/// If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository to be evaluated.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository to be evaluated.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The policy to be evaluated against. If you do not specify a policy, the current policy
/// for the repository is used.</p>
pub fn lifecycle_policy_text(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.lifecycle_policy_text(inp);
self
}
/// <p>The policy to be evaluated against. If you do not specify a policy, the current policy
/// for the repository is used.</p>
pub fn set_lifecycle_policy_text(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_lifecycle_policy_text(input);
self
}
}
/// Fluent builder constructing a request to `TagResource`.
///
/// <p>Adds specified tags to a resource with the specified ARN. Existing tags on a resource
/// are not changed if they are not specified in the request parameters.</p>
#[derive(std::fmt::Debug)]
pub struct TagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::tag_resource_input::Builder,
}
impl<C, M, R> TagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TagResourceInputOperationOutputAlias,
crate::output::TagResourceOutput,
crate::error::TagResourceError,
crate::input::TagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the the resource to which to add tags. Currently, the only supported
/// resource is an Amazon ECR repository.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) of the the resource to which to add tags. Currently, the only supported
/// resource is an Amazon ECR repository.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags to add to the resource. A tag is an array of key-value pairs.
/// Tag keys can have a maximum character length of 128 characters, and tag values can have
/// a maximum length of 256 characters.</p>
pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self {
self.inner = self.inner.tags(inp);
self
}
/// <p>The tags to add to the resource. A tag is an array of key-value pairs.
/// Tag keys can have a maximum character length of 128 characters, and tag values can have
/// a maximum length of 256 characters.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `UntagResource`.
///
/// <p>Deletes specified tags from a resource.</p>
#[derive(std::fmt::Debug)]
pub struct UntagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::untag_resource_input::Builder,
}
impl<C, M, R> UntagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UntagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UntagResourceInputOperationOutputAlias,
crate::output::UntagResourceOutput,
crate::error::UntagResourceError,
crate::input::UntagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the resource from which to remove tags. Currently, the only supported
/// resource is an Amazon ECR repository.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The Amazon Resource Name (ARN) of the resource from which to remove tags. Currently, the only supported
/// resource is an Amazon ECR repository.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `tagKeys`.
///
/// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
///
/// <p>The keys of the tags to be removed.</p>
pub fn tag_keys(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(inp);
self
}
/// <p>The keys of the tags to be removed.</p>
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
/// Fluent builder constructing a request to `UploadLayerPart`.
///
/// <p>Uploads an image layer part to Amazon ECR.</p>
/// <p>When an image is pushed, each new image layer is uploaded in parts. The maximum size
/// of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API
/// is called once per each new image layer part.</p>
/// <note>
/// <p>This operation is used by the Amazon ECR proxy and is not generally used by
/// customers for pulling and pushing images. In most cases, you should use the <code>docker</code> CLI to pull, tag, and push images.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct UploadLayerPart<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::upload_layer_part_input::Builder,
}
impl<C, M, R> UploadLayerPart<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UploadLayerPart`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UploadLayerPartOutput,
aws_smithy_http::result::SdkError<crate::error::UploadLayerPartError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UploadLayerPartInputOperationOutputAlias,
crate::output::UploadLayerPartOutput,
crate::error::UploadLayerPartError,
crate::input::UploadLayerPartInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Web Services account ID associated with the registry to which you are uploading layer
/// parts. If you do not specify a registry, the default registry is assumed.</p>
pub fn registry_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.registry_id(inp);
self
}
/// <p>The Amazon Web Services account ID associated with the registry to which you are uploading layer
/// parts. If you do not specify a registry, the default registry is assumed.</p>
pub fn set_registry_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_registry_id(input);
self
}
/// <p>The name of the repository to which you are uploading layer parts.</p>
pub fn repository_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.repository_name(inp);
self
}
/// <p>The name of the repository to which you are uploading layer parts.</p>
pub fn set_repository_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_repository_name(input);
self
}
/// <p>The upload ID from a previous <a>InitiateLayerUpload</a> operation to
/// associate with the layer part upload.</p>
pub fn upload_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.upload_id(inp);
self
}
/// <p>The upload ID from a previous <a>InitiateLayerUpload</a> operation to
/// associate with the layer part upload.</p>
pub fn set_upload_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_upload_id(input);
self
}
/// <p>The position of the first byte of the layer part witin the overall image layer.</p>
pub fn part_first_byte(mut self, inp: i64) -> Self {
self.inner = self.inner.part_first_byte(inp);
self
}
/// <p>The position of the first byte of the layer part witin the overall image layer.</p>
pub fn set_part_first_byte(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_part_first_byte(input);
self
}
/// <p>The position of the last byte of the layer part within the overall image layer.</p>
pub fn part_last_byte(mut self, inp: i64) -> Self {
self.inner = self.inner.part_last_byte(inp);
self
}
/// <p>The position of the last byte of the layer part within the overall image layer.</p>
pub fn set_part_last_byte(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_part_last_byte(input);
self
}
/// <p>The base64-encoded layer part payload.</p>
pub fn layer_part_blob(mut self, inp: aws_smithy_types::Blob) -> Self {
self.inner = self.inner.layer_part_blob(inp);
self
}
/// <p>The base64-encoded layer part payload.</p>
pub fn set_layer_part_blob(
mut self,
input: std::option::Option<aws_smithy_types::Blob>,
) -> Self {
self.inner = self.inner.set_layer_part_blob(input);
self
}
}
}
impl<C> Client<C, aws_hyper::AwsMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::new(conn)
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
aws_hyper::AwsMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::https()
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| 47.930502 | 257 | 0.60968 |
282ff07c829e5a331b32beb589cc80d6942b87dc | 10,946 | use std::rc::Rc;
use std::collections::HashMap;
use std::iter::FromIterator;
use std::path::PathBuf;
use std::string::ToString;
use serde::{Serialize, Deserialize, de::DeserializeOwned};
use crate::convert;
use crate::auth;
use google_datastore1::RunQueryRequest;
pub use crate::auth::Auth;
///////////////////////////////////////////////////////////////////////////////
// HELPERS
///////////////////////////////////////////////////////////////////////////////
pub trait EntityKey {
fn entity_kind_key() -> String;
fn entity_name_key(&self) -> String;
}
#[derive(Debug)]
pub enum Error {
Serialization {
msg: String,
},
Deserialization {
msg: String,
},
DatabaseResponse(google_datastore1::Error),
NoPayload,
}
unsafe impl Send for Error {}
///////////////////////////////////////////////////////////////////////////////
// CLIENT
///////////////////////////////////////////////////////////////////////////////
type Handle = google_datastore1::Datastore<hyper::Client, auth::Auth>;
#[derive(Clone)]
pub struct DatastoreClient {
handle: Rc<Handle>,
project_id: String,
}
impl DatastoreClient {
/// Automatically finds auth credentials.
/// See `Auth::new()` for auth related details.
pub fn new() -> Result<Self, String> {
let auth = Auth::new()?;
DatastoreClient::new_with_auth(auth)
}
pub fn new_with_auth(auth: Auth) -> Result<Self, String> {
let project_id = auth.project_id.clone();
let client = hyper::Client::with_connector(
hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())
);
let hub = google_datastore1::Datastore::new(client, auth);
Ok(DatastoreClient {
handle: Rc::new(hub),
project_id,
})
}
pub fn insert<T: Serialize + EntityKey>(&self, value: T) -> Result<(), Error> {
let kind_key = T::entity_kind_key();
let name_key = value.entity_name_key();
let properties = convert::to_datastore_value(value)
.and_then(|value| {
value.entity_value
})
.and_then(|x| x.properties)
.ok_or(Error::Serialization {
msg: String::from("expecting struct/map like input")
})?;
let entity = google_datastore1::Entity {
properties: Some(properties),
key: Some(google_datastore1::Key {
path: Some(vec![
google_datastore1::PathElement {
kind: Some(kind_key.to_owned()),
name: Some(name_key.to_owned()),
id: None
}
]),
partition_id: None
})
};
let req = google_datastore1::CommitRequest {
transaction: None,
mutations: Some(vec![
google_datastore1::Mutation {
insert: Some(entity),
delete: None,
update: None,
base_version: None,
upsert: None
}
]),
mode: Some(String::from("NON_TRANSACTIONAL"))
};
let result = self.handle
.projects()
.commit(req, &self.project_id)
.doit();
match result {
Ok(_) => Ok(()),
Err(e) => Err(Error::DatabaseResponse(e))
}
}
pub fn upsert<T: Serialize + EntityKey>(&self, value: T) -> Result<(), Error> {
let kind_key = T::entity_kind_key();
let name_key = value.entity_name_key();
let properties = convert::to_datastore_value(value)
.and_then(|value| {
value.entity_value
})
.and_then(|x| x.properties)
.ok_or(Error::Serialization {
msg: String::from("expecting struct/map like input")
})?;
let entity = google_datastore1::Entity {
properties: Some(properties),
key: Some(google_datastore1::Key {
path: Some(vec![
google_datastore1::PathElement {
kind: Some(kind_key.to_owned()),
name: Some(name_key.to_owned()),
id: None
}
]),
partition_id: None
})
};
let req = google_datastore1::CommitRequest {
transaction: None,
mutations: Some(vec![
google_datastore1::Mutation {
insert: None,
delete: None,
update: None,
base_version: None,
upsert: Some(entity),
}
]),
mode: Some(String::from("NON_TRANSACTIONAL"))
};
let result = self.handle
.projects()
.commit(req, &self.project_id)
.doit();
match result {
Ok(_) => Ok(()),
Err(e) => Err(Error::DatabaseResponse(e))
}
}
pub fn update<T: Serialize + EntityKey>(&self, value: T) -> Result<(), Error> {
let kind_key = T::entity_kind_key();
let name_key = value.entity_name_key();
let properties = convert::to_datastore_value(value)
.and_then(|value| {
value.entity_value
})
.and_then(|x| x.properties)
.ok_or(Error::Serialization {
msg: String::from("expecting struct/map like input")
})?;
let entity = google_datastore1::Entity {
properties: Some(properties),
key: Some(google_datastore1::Key {
path: Some(vec![
google_datastore1::PathElement {
kind: Some(kind_key.to_owned()),
name: Some(name_key.to_owned()),
id: None
}
]),
partition_id: None
})
};
let req = google_datastore1::CommitRequest {
transaction: None,
mutations: Some(vec![
google_datastore1::Mutation {
insert: None,
delete: None,
update: Some(entity),
base_version: None,
upsert: None,
}
]),
mode: Some(String::from("NON_TRANSACTIONAL"))
};
let result = self.handle
.projects()
.commit(req, &self.project_id)
.doit();
match result {
Ok(_) => Ok(()),
Err(e) => Err(Error::DatabaseResponse(e))
}
}
pub fn get<T: DeserializeOwned + EntityKey, K: ToString>(&self, name_key: K) -> Result<T, Error> {
let kind_key = T::entity_kind_key();
let req = google_datastore1::LookupRequest {
keys: Some(vec![
google_datastore1::Key {
path: Some(vec![
google_datastore1::PathElement {
kind: Some(kind_key),
name: Some(name_key.to_string()),
id: None
}
]),
partition_id: None
}]),
read_options: None
};
let result = self.handle
.projects()
.lookup(req, &self.project_id)
.doit();
match result {
Ok((_, lookup_response)) => {
let payload = lookup_response.found
.and_then(|entities| {
entities.first().map(|x| x.clone())
})
.and_then(|x| x.entity)
.ok_or(Error::NoPayload)?;
convert::from_datastore_entity(payload.clone())
.ok_or_else(|| {
Error::Deserialization {
msg: String::from("conversion or parser error")
}
})
}
Err(e) => Err(Error::DatabaseResponse(e)),
}
}
pub fn list<T: DeserializeOwned + EntityKey>(&self) -> Result<Vec<T>, Error> {
let kind_key = T::entity_kind_key();
let mut query = RunQueryRequest{
query: Some(google_datastore1::Query{
start_cursor: None,
kind: Some(vec![ google_datastore1::KindExpression { name: Some(kind_key)} ]),
projection: None,
distinct_on: None,
filter: None,
limit: None,
offset: None,
end_cursor:None,
order: None,
}),
partition_id: None,
gql_query: None,
read_options: None,
};
let result = self.handle
.projects()
//.lookup(req, &self.project_id)
.run_query(query, &self.project_id)
.doit();
match result {
Ok((_, query_response)) => {
let payload = query_response.batch
.and_then(|batch| batch.entity_results )
.and_then(|entities| {
Some(entities.into_iter().filter_map(|x| x.entity)
.filter_map(|x| convert::from_datastore_entity(x.clone()))
.collect::<Vec<T>>())
})
.ok_or(Error::NoPayload)?;
Ok(payload)
}
Err(e) => Err(Error::DatabaseResponse(e)),
}
}
pub fn delete<T: EntityKey, K: ToString>(&self, name_key: K) -> Result<(), Error> {
let kind_key = T::entity_kind_key();
let name_key = name_key.to_string();
let entity_key = google_datastore1::Key {
path: Some(vec![
google_datastore1::PathElement {
kind: Some(kind_key.to_owned()),
name: Some(name_key.to_owned()),
id: None
}
]),
partition_id: None
};
let req = google_datastore1::CommitRequest {
transaction: None,
mutations: Some(vec![
google_datastore1::Mutation {
insert: None,
delete: Some(entity_key),
update: None,
base_version: None,
upsert: None,
}
]),
mode: Some(String::from("NON_TRANSACTIONAL"))
};
let result = self.handle
.projects()
.commit(req, &self.project_id)
.doit();
match result {
Ok(_) => Ok(()),
Err(e) => Err(Error::DatabaseResponse(e))
}
}
}
| 33.993789 | 102 | 0.457793 |
deff7bab00dc0eca19135e7d0810513bd5c0ce54 | 2,010 | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Utility features shared by both the decoder and encoder VDA backends.
use super::{error::VideoError, format::Profile};
/// Transparent convertion from libvda error to VideoError backend failure.
impl From<libvda::Error> for VideoError {
fn from(error: libvda::Error) -> Self {
VideoError::BackendFailure(Box::new(error))
}
}
macro_rules! impl_libvda_conversion {
( $( ( $x:ident, $y:ident ) ),* ) => {
pub fn from_libvda_profile(p: libvda::Profile) -> Option<Self> {
match p {
$(libvda::Profile::$x => Some(Self::$y),)*
_ => None
}
}
#[cfg(feature = "video-encoder")]
pub fn to_libvda_profile(&self) -> Option<libvda::Profile> {
match self {
$(Self::$y => Some(libvda::Profile::$x),)*
_ => None
}
}
}
}
impl Profile {
impl_libvda_conversion!(
(H264ProfileBaseline, H264Baseline),
(H264ProfileMain, H264Main),
(H264ProfileExtended, H264Extended),
(H264ProfileHigh, H264High),
(H264ProfileHigh10Profile, H264High10),
(H264ProfileHigh422Profile, H264High422),
(
H264ProfileHigh444PredictiveProfile,
H264High444PredictiveProfile
),
(H264ProfileScalableBaseline, H264ScalableBaseline),
(H264ProfileScalableHigh, H264ScalableHigh),
(H264ProfileStereoHigh, H264StereoHigh),
(H264ProfileMultiviewHigh, H264MultiviewHigh),
(HevcProfileMain, HevcMain),
(HevcProfileMain10, HevcMain10),
(HevcProfileMainStillPicture, HevcMainStillPicture),
(VP8, VP8Profile0),
(VP9Profile0, VP9Profile0),
(VP9Profile1, VP9Profile1),
(VP9Profile2, VP9Profile2),
(VP9Profile3, VP9Profile3)
);
}
| 33.5 | 75 | 0.61791 |
69984fbb62f1467bbcc8309bcbb34890db1e3995 | 506 | // Test invoked `&self` methods on owned objects where the values
// closed over contain managed values. This implies that the boxes
// will have headers that must be skipped over.
#![feature(box_syntax)]
trait FooTrait {
fn foo(self: Box<Self>) -> usize;
}
struct BarStruct {
x: usize
}
impl FooTrait for BarStruct {
fn foo(self: Box<BarStruct>) -> usize {
self.x
}
}
pub fn main() {
let foo = box BarStruct{ x: 22 } as Box<dyn FooTrait>;
assert_eq!(22, foo.foo());
}
| 20.24 | 66 | 0.650198 |
29bba74f977befd223935b254d94334bd8c9f78c | 961 | mod network {
#[derive(Clone)]
pub struct PipeWrap {
pipe_fd: Vec<usize>,
}
impl PipeWrap {
fn new() -> Self {
let mut fd: Vec<usize> = vec![];
fd.push(usize::MAX);
fd.push(usize::MAX);
Self {
pipe_fd: fd,
}
}
}
pub trait PipeWrapTrait {
fn write(buf: *const (), n: usize) -> usize;
fn read(buf: *mut (), n: usize) -> usize;
fn readfd(&self) -> usize;
fn writefd(&self) -> usize;
fn clearfd(&self);
}
/*
impl PipeWrapTrait for PipeWrap {
fn write(buf: *const (), n: usize) -> usize {
}
fn read(buf: *mut (), n: usize) -> usize {
}
fn readfd(&self) -> usize {
return self.pipe_fd[0].clone();
}
fn writefd(&self) -> usize {
return self.pipe_fd[1].clone();
}
}
*/
}
| 24.025 | 53 | 0.426639 |
f5c676e87dc8ea430a6ff9a4604cfa07a87c12b7 | 3,232 | // Built-in deps
use std::{env, fmt, time::Instant};
// External imports
use async_trait::async_trait;
use deadpool::managed::{Manager, PoolConfig, RecycleResult, Timeouts};
use sqlx::{Connection, Error as SqlxError, PgConnection};
// Local imports
// use self::recoverable_connection::RecoverableConnection;
use crate::StorageProcessor;
use zksync_utils::parse_env;
pub mod holder;
type Pool = deadpool::managed::Pool<PgConnection, SqlxError>;
pub type PooledConnection = deadpool::managed::Object<PgConnection, SqlxError>;
#[derive(Clone)]
struct DbPool {
url: String,
}
impl DbPool {
fn create(url: impl Into<String>, max_size: usize) -> Pool {
let pool_config = PoolConfig {
max_size,
timeouts: Timeouts::wait_millis(20_000), // wait 20 seconds before returning error
};
Pool::from_config(DbPool { url: url.into() }, pool_config)
}
}
#[async_trait]
impl Manager<PgConnection, SqlxError> for DbPool {
async fn create(&self) -> Result<PgConnection, SqlxError> {
PgConnection::connect(&self.url).await
}
async fn recycle(&self, obj: &mut PgConnection) -> RecycleResult<SqlxError> {
Ok(obj.ping().await?)
}
}
/// `ConnectionPool` is a wrapper over a `diesel`s `Pool`, encapsulating
/// the fixed size pool of connection to the database.
///
/// The size of the pool and the database URL are configured via environment
/// variables `DATABASE_POOL_SIZE` and `DATABASE_URL` respectively.
#[derive(Clone)]
pub struct ConnectionPool {
pool: Pool,
}
impl fmt::Debug for ConnectionPool {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Recoverable connection")
}
}
impl ConnectionPool {
/// Establishes a pool of the connections to the database and
/// creates a new `ConnectionPool` object.
/// pool_max_size - number of connections in pool, if not set env variable "DATABASE_POOL_SIZE" is going to be used.
pub fn new(pool_max_size: Option<u32>) -> Self {
let database_url = Self::get_database_url();
let max_size = pool_max_size.unwrap_or_else(|| parse_env("DATABASE_POOL_SIZE"));
let pool = DbPool::create(database_url, max_size as usize);
Self { pool }
}
/// Creates a `StorageProcessor` entity over a recoverable connection.
/// Upon a database outage connection will block the thread until
/// it will be able to recover the connection (or, if connection cannot
/// be restored after several retries, this will be considered as
/// irrecoverable database error and result in panic).
///
/// This method is intended to be used in crucial contexts, where the
/// database access is must-have (e.g. block committer).
pub async fn access_storage(&self) -> Result<StorageProcessor<'_>, SqlxError> {
let start = Instant::now();
let connection = self.pool.get().await.unwrap();
metrics::histogram!("sql.connection_acquire", start.elapsed());
Ok(StorageProcessor::from_pool(connection))
}
/// Obtains the database URL from the environment variable.
fn get_database_url() -> String {
env::var("DATABASE_URL").expect("DATABASE_URL must be set")
}
}
| 34.752688 | 120 | 0.684406 |
bf7102f61e67f9ed5ce7156934c8a828994d7348 | 13,561 | // Copyright (c) 2018-2021 The MobileCoin Foundation
//! Abstract interface to the fog recovery database
#![no_std]
#![deny(missing_docs)]
extern crate alloc;
mod types;
use alloc::{string::String, vec::Vec};
use core::fmt::{Debug, Display};
use mc_crypto_keys::CompressedRistrettoPublic;
use mc_fog_kex_rng::KexRngPubkey;
use mc_fog_types::view::TxOutSearchResult;
pub use mc_fog_types::{common::BlockRange, ETxOutRecord};
pub use mc_transaction_core::Block;
pub use types::{
AddBlockDataStatus, FogUserEvent, IngestInvocationId, IngestableRange, IngressPublicKeyRecord,
IngressPublicKeyStatus, ReportData,
};
/// Contains fields that are used as filters in queries for ingress keys.
pub struct IngressPublicKeyRecordFilters {
/// If set to true, the query will include ingress keys that are lost.
pub should_include_lost_keys: bool,
/// If set to true, the query will include ingress keys that are retired.
pub should_include_retired_keys: bool,
/// If set to true, the query will only include keys that are unexpired,
/// which means that the key's last scanned block is less than the key's
/// public expiry.
pub should_only_include_unexpired_keys: bool,
}
/// A generic error type for recovery db operations
pub trait RecoveryDbError: Debug + Display + Send + Sync {
/// Policy decision about whether the error should be retried (e.g.
/// connection issue)
fn should_retry(&self) -> bool;
}
/// The recovery database interface.
pub trait RecoveryDb {
/// The error type returned by the various calls in this trait.
type Error: RecoveryDbError;
/// Get the status of an ingress public key
fn get_ingress_key_status(
&self,
key: &CompressedRistrettoPublic,
) -> Result<Option<IngressPublicKeyStatus>, Self::Error>;
/// Add a new ingress public key, which does not currently exist in the DB.
///
/// Arguments:
/// * key: the public key
/// * start_block: the first block count we promise to scan with this key
///
/// Returns
/// * The accepted start block count
fn new_ingress_key(
&self,
key: &CompressedRistrettoPublic,
start_block_count: u64,
) -> Result<u64, Self::Error>;
/// Mark an ingress public key for retiring.
///
/// Passing set_retired = true will make all servers using it stop
/// publishing reports, continue scanning to pubkey expiry value, and
/// then stop. set_retired = false will cause it to not be marked for
/// retiring anymore, if it was marked for retiring by mistake.
fn retire_ingress_key(
&self,
key: &CompressedRistrettoPublic,
set_retired: bool,
) -> Result<(), Self::Error>;
/// Get the index of the last block scanned using this ingress key, if any.
///
/// Arguments:
/// * key: the ingress key
///
/// Returns:
/// * Some(BlockIndex) if blocks have already been scanned using this key,
/// None if no blocks have been scanned using this key.
fn get_last_scanned_block_index(
&self,
key: &CompressedRistrettoPublic,
) -> Result<Option<u64>, Self::Error>;
/// Get all ingress key records in the database.
///
/// The records will be filtered so that records whose start block is less
/// than the given number won't be returned.
fn get_ingress_key_records(
&self,
start_block_at_least: u64,
ingress_public_key_record_filters: &IngressPublicKeyRecordFilters,
) -> Result<Vec<IngressPublicKeyRecord>, Self::Error>;
/// Adds a new ingest invocation to the database, optionally decommissioning
/// an older one.
///
/// This should be done when the ingest enclave is processing block data,
/// and the ORAM overflows and the KexRngPubkey is rotated by the enclave.
/// And, when the ingest enclave starts up and creates a KexRngPubkey, just
/// before it starts consuming transactions.
///
/// This decommissions the old ingest invocation id and creates a new one,
/// associated to the new public keys.
/// Arguments:
/// * prev_ingest_invocation_id: The previous unique ingest invocation id to
/// retire
/// * ingress_public_key: The ingest server ingress public key, as reported
/// to the report server.
/// * egress_public_key: The kex rng pubkey emitted by the ingest enclave
/// * start_block: The first block index this ingest invocation will start
/// ingesting from.
fn new_ingest_invocation(
&self,
prev_ingest_invocation_id: Option<IngestInvocationId>,
ingress_public_key: &CompressedRistrettoPublic,
egress_public_key: &KexRngPubkey,
start_block: u64,
) -> Result<IngestInvocationId, Self::Error>;
/// Get the list of blocks that the fog deployment is able to ingest.
// TODO: Allow filtering so that we don't always get the entire list.
fn get_ingestable_ranges(&self) -> Result<Vec<IngestableRange>, Self::Error>;
/// Decommission a given ingest invocation.
///
/// This should be done when a given ingest enclave goes down or is retired.
///
/// Arguments:
/// * ingest_invocation_id: The unique ingest invocation id that has been
/// retired
fn decommission_ingest_invocation(
&self,
ingest_invocation_id: &IngestInvocationId,
) -> Result<(), Self::Error>;
/// Add records corresponding to a FULLY PROCESSED BLOCK to the database
///
/// Arguments:
/// * ingest_invocation_id: The unique ingest invocation id this block was
/// processed by.
/// * block: The block that was processed.
/// * block_signature_timestamp: Seconds since the unix epoch when the block
/// was signed
/// * tx_rows: TxRows that the ingest enclave emitted when processing this
/// block
fn add_block_data(
&self,
ingest_invocation_id: &IngestInvocationId,
block: &Block,
block_signature_timestamp: u64,
txs: &[ETxOutRecord],
) -> Result<AddBlockDataStatus, Self::Error>;
/// Report that an ingress key has been lost irrecoverably.
///
/// This occurs if all the enclaves that have the key are lost.
/// If we have not scanned all the blocks up to pubkey_expiry for this key,
/// then the remaining blocks are "missed blocks", and clients will have to
/// download these blocks and view-key scan them.
///
/// When this call is made,
/// * the key is marked as lost in the database,
/// * the half-open range [last-scanned + 1, pubkey_expiry) is registered as
/// a missed block range, if that range is not empty.
///
/// When all the enclaves that have the key are lost, but the key is not
/// reported lost, the view server will be blocked from increasing
/// "highest_processed_block_count" value, because it is still expecting
/// more data to be produced against this key. From the client's point
/// of view, it is as if fog stopped making progress relative to the ledger,
/// but the balance check process still returns a balance that was correct
/// at that point in time.
///
/// Once a key is published to the users, producing more blocks scanned with
/// it, or reporting the key lost, is the only way to allow the view
/// server to make progress, so that clients do not compute incorrect
/// balances.
///
/// Arguments:
/// * ingress_key: The ingress key that is marked lost.
fn report_lost_ingress_key(
&self,
lost_ingress_key: CompressedRistrettoPublic,
) -> Result<(), Self::Error>;
/// Gets all the known missed block ranges.
///
/// Returns:
/// * A vector of missing block ranges.
fn get_missed_block_ranges(&self) -> Result<Vec<BlockRange>, Self::Error>;
/// Get any events which are new after `start_after_event_id`.
///
/// Arguments:
/// * start_after_event_id: The last event id the user has received.
///
/// Returns:
/// * List of found events, and higehst event id in the database (to be used
/// as
/// start_after_event_id in the next query).
fn search_user_events(
&self,
start_from_user_event_id: i64,
) -> Result<(Vec<FogUserEvent>, i64), Self::Error>;
/// Get any TxOutSearchResults corresponding to given search keys.
/// Nonzero start_block can be provided as an optimization opportunity.
///
/// Arguments:
/// * start_block: A lower bound on where we will search. This can often be
/// provided by the user in order to limit the scope of the search and
/// reduce load on the servers.
/// * search_keys: A list of fog tx_out search keys to search for.
///
/// Returns:
/// * Exactly one TxOutSearchResult object for every search key, or an
/// internal database error description.
fn get_tx_outs(
&self,
start_block: u64,
search_keys: &[Vec<u8>],
) -> Result<Vec<TxOutSearchResult>, Self::Error>;
/// Mark a given ingest invocation as still being alive.
fn update_last_active_at(
&self,
ingest_invocation_id: &IngestInvocationId,
) -> Result<(), Self::Error>;
/// Get any ETxOutRecords produced by a given IngestInvocationId for a given
/// block index.
///
/// Arguments:
/// * ingress_key: The ingress_key we need ETxOutRecords from
/// * block_index: The block we need ETxOutRecords from
///
/// Returns:
/// * Ok(None) if this block has not been scanned with this key.
/// Ok(Some(data)) with the ETxOutRecord's from when this block was added,
/// An error if there is a database error
fn get_tx_outs_by_block_and_key(
&self,
ingress_key: CompressedRistrettoPublic,
block_index: u64,
) -> Result<Option<Vec<ETxOutRecord>>, Self::Error>;
/// Get the invocation id that published this block with this key.
///
/// Note: This is only used by TESTS right now, but it is important to be
/// able to test this
///
/// Arguments:
/// * ingress_key: The ingress key we are interested in
/// * block_index: the blcok we are interested in
///
/// Returns:
/// * Ok(None) if this block has not been scanned with this key
/// Ok(Some(iid)) if this block has been scanned with this key, and iid is
/// the invocation id that did it An error if there was a database error
fn get_invocation_id_by_block_and_key(
&self,
ingress_key: CompressedRistrettoPublic,
block_index: u64,
) -> Result<Option<IngestInvocationId>, Self::Error>;
/// Get the cumulative txo count for a given block number.
///
/// Arguments:
/// * block_index: The block we need cumulative_txo_count for.
///
/// Returns:
/// * Some(cumulative_txo_count) if the block was found in the database,
/// None if it wasn't, or
/// an error if the query failed.
fn get_cumulative_txo_count_for_block(
&self,
block_index: u64,
) -> Result<Option<u64>, Self::Error>;
/// Get the block signature timestamp for a given block number.
/// This is a number of seconds since the unix epoch.
///
/// Arguments:
/// * block_index: The block we need cumulative_txo_count for.
///
/// Returns:
/// * Some(timestamp) if the block was found in the database, None if it
/// wasn't, or
/// an error if the query failed.
///
/// Note: It is unspecified which invocation id we use when giving the
/// timestamp
fn get_block_signature_timestamp_for_block(
&self,
block_index: u64,
) -> Result<Option<u64>, Self::Error>;
/// Get the highest block index for which we have any data at all.
fn get_highest_known_block_index(&self) -> Result<Option<u64>, Self::Error>;
}
/// The report database interface.
pub trait ReportDb {
/// The error type returned by the various calls in this trait.
type Error: RecoveryDbError;
/// Get all available report data
/// Note: We always give the user all the report data, because it is a
/// privacy issue if the user divulges which report they care about.
/// There are not expected to be very many reports.
/// If there are many reports, then this should be redesigned to use an
/// oblivious lookup strategy inside of an sgx enclave.
///
/// Returns:
/// * Pairs of the form report-id, report-data
fn get_all_reports(&self) -> Result<Vec<(String, ReportData)>, Self::Error>;
/// Set report data associated with a given report id, unless the public key
/// is retired.
///
/// Arguments:
/// * ingress_public_key - the public key signed by this report
/// * report_id - the report_id associated to the report. this should almost
/// always be the empty string.
/// * data - The IAS verification report and cert chain.
///
/// Returns:
/// * The status of this ingress public key in the database. If the status
/// is retired, then this set operation DID NOT HAPPEN, and no changes
/// were made to the database.
fn set_report(
&self,
ingress_public_key: &CompressedRistrettoPublic,
report_id: &str,
data: &ReportData,
) -> Result<IngressPublicKeyStatus, Self::Error>;
/// Remove report data associated with a given report id.
fn remove_report(&self, report_id: &str) -> Result<(), Self::Error>;
}
| 38.416431 | 98 | 0.661529 |
72dc70d93237fd55e7c1c2ec3356906b5e478f81 | 1,017 | // traits2.rs
//
// Your task is to implement the trait
// `AppendBar' for a vector of strings.
//
// To implement this trait, consider for
// a moment what it means to 'append "Bar"'
// to a vector of strings.
//
// No boiler plate code this time,
// you can do this!
trait AppendBar {
fn append_bar(self) -> Self;
}
//TODO: Add your code here
impl AppendBar for String {
//Add your code here
fn append_bar(self) -> Self {
let mut ret = self;
ret.push_str("Bar");
ret
}
}
impl AppendBar for Vec<String> {
fn append_bar(self) -> Self {
let mut ret = self;
ret.push(String::from("Bar"));
ret
// self.iter().map(|s| format!("{}{}", s, "Bar")).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_vec_pop_eq_bar() {
let mut foo = vec![String::from("Foo")].append_bar();
assert_eq!(foo.pop().unwrap(), String::from("Bar"));
assert_eq!(foo.pop().unwrap(), String::from("Foo"));
}
}
| 21.1875 | 67 | 0.570305 |
1c69a2f57c5eb73d55965b5aafca73a5f584ab9c | 28,355 | use crate::rust::{vec::Vec, string::String, borrow::ToOwned};
use crate::{io, elements};
use super::{
Serialize,
Deserialize,
Error,
VarUint7,
VarUint32,
CountedList,
ImportEntry,
MemoryType,
TableType,
ExportEntry,
GlobalEntry,
Func,
FuncBody,
ElementSegment,
DataSegment,
CountedWriter,
CountedListWriter,
External,
serialize,
};
use super::types::Type;
use super::name_section::NameSection;
use super::reloc_section::RelocSection;
const ENTRIES_BUFFER_LENGTH: usize = 16384;
/// Section in the WebAssembly module.
#[derive(Debug, Clone, PartialEq)]
pub enum Section {
/// Section is unparsed.
Unparsed {
/// id of the unparsed section.
id: u8,
/// raw bytes of the unparsed section.
payload: Vec<u8>,
},
/// Custom section (`id=0`).
Custom(CustomSection),
/// Types section.
Type(TypeSection),
/// Import section.
Import(ImportSection),
/// Function signatures section.
Function(FunctionSection),
/// Table definition section.
Table(TableSection),
/// Memory definition section.
Memory(MemorySection),
/// Global entries section.
Global(GlobalSection),
/// Export definitions.
Export(ExportSection),
/// Entry reference of the module.
Start(u32),
/// Elements section.
Element(ElementSection),
/// Number of passive data entries in the data section
DataCount(u32),
/// Function bodies section.
Code(CodeSection),
/// Data definition section.
Data(DataSection),
/// Name section.
///
/// Note that initially it is not parsed until `parse_names` is called explicitly.
Name(NameSection),
/// Relocation section.
///
/// Note that initially it is not parsed until `parse_reloc` is called explicitly.
/// Also note that currently there are serialization (but not de-serialization)
/// issues with this section (#198).
Reloc(RelocSection),
}
impl Deserialize for Section {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
let id = match VarUint7::deserialize(reader) {
// todo: be more selective detecting no more section
Err(_) => { return Err(Error::UnexpectedEof); },
Ok(id) => id,
};
Ok(
match id.into() {
0 => {
Section::Custom(CustomSection::deserialize(reader)?.into())
},
1 => {
Section::Type(TypeSection::deserialize(reader)?)
},
2 => {
Section::Import(ImportSection::deserialize(reader)?)
},
3 => {
Section::Function(FunctionSection::deserialize(reader)?)
},
4 => {
Section::Table(TableSection::deserialize(reader)?)
},
5 => {
Section::Memory(MemorySection::deserialize(reader)?)
},
6 => {
Section::Global(GlobalSection::deserialize(reader)?)
},
7 => {
Section::Export(ExportSection::deserialize(reader)?)
},
8 => {
let mut section_reader = SectionReader::new(reader)?;
let start_idx = VarUint32::deserialize(&mut section_reader)?;
section_reader.close()?;
Section::Start(start_idx.into())
},
9 => {
Section::Element(ElementSection::deserialize(reader)?)
},
10 => {
Section::Code(CodeSection::deserialize(reader)?)
},
11 => {
Section::Data(DataSection::deserialize(reader)?)
},
12 => {
let mut section_reader = SectionReader::new(reader)?;
let count = VarUint32::deserialize(&mut section_reader)?;
section_reader.close()?;
Section::DataCount(count.into())
},
invalid_id => {
return Err(Error::InvalidSectionId(invalid_id))
},
}
)
}
}
impl Serialize for Section {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
match self {
Section::Custom(custom_section) => {
VarUint7::from(0x00).serialize(writer)?;
custom_section.serialize(writer)?;
},
Section::Unparsed { id, payload } => {
VarUint7::from(id).serialize(writer)?;
writer.write(&payload[..])?;
},
Section::Type(type_section) => {
VarUint7::from(0x01).serialize(writer)?;
type_section.serialize(writer)?;
},
Section::Import(import_section) => {
VarUint7::from(0x02).serialize(writer)?;
import_section.serialize(writer)?;
},
Section::Function(function_section) => {
VarUint7::from(0x03).serialize(writer)?;
function_section.serialize(writer)?;
},
Section::Table(table_section) => {
VarUint7::from(0x04).serialize(writer)?;
table_section.serialize(writer)?;
},
Section::Memory(memory_section) => {
VarUint7::from(0x05).serialize(writer)?;
memory_section.serialize(writer)?;
},
Section::Global(global_section) => {
VarUint7::from(0x06).serialize(writer)?;
global_section.serialize(writer)?;
},
Section::Export(export_section) => {
VarUint7::from(0x07).serialize(writer)?;
export_section.serialize(writer)?;
},
Section::Start(index) => {
VarUint7::from(0x08).serialize(writer)?;
let mut counted_writer = CountedWriter::new(writer);
VarUint32::from(index).serialize(&mut counted_writer)?;
counted_writer.done()?;
},
Section::DataCount(count) => {
VarUint7::from(0x0c).serialize(writer)?;
let mut counted_writer = CountedWriter::new(writer);
VarUint32::from(count).serialize(&mut counted_writer)?;
counted_writer.done()?;
},
Section::Element(element_section) => {
VarUint7::from(0x09).serialize(writer)?;
element_section.serialize(writer)?;
},
Section::Code(code_section) => {
VarUint7::from(0x0a).serialize(writer)?;
code_section.serialize(writer)?;
},
Section::Data(data_section) => {
VarUint7::from(0x0b).serialize(writer)?;
data_section.serialize(writer)?;
},
Section::Name(name_section) => {
VarUint7::from(0x00).serialize(writer)?;
let custom = CustomSection {
name: "name".to_owned(),
payload: serialize(name_section)?,
};
custom.serialize(writer)?;
},
Section::Reloc(reloc_section) => {
VarUint7::from(0x00).serialize(writer)?;
reloc_section.serialize(writer)?;
},
}
Ok(())
}
}
impl Section {
pub(crate) fn order(&self) -> u8 {
match *self {
Section::Custom(_) => 0x00,
Section::Unparsed { .. } => 0x00,
Section::Type(_) => 0x1,
Section::Import(_) => 0x2,
Section::Function(_) => 0x3,
Section::Table(_) => 0x4,
Section::Memory(_) => 0x5,
Section::Global(_) => 0x6,
Section::Export(_) => 0x7,
Section::Start(_) => 0x8,
Section::Element(_) => 0x9,
Section::DataCount(_) => 0x0a,
Section::Code(_) => 0x0b,
Section::Data(_) => 0x0c,
Section::Name(_) => 0x00,
Section::Reloc(_) => 0x00,
}
}
}
pub(crate) struct SectionReader {
cursor: io::Cursor<Vec<u8>>,
declared_length: usize,
}
impl SectionReader {
pub fn new<R: io::Read>(reader: &mut R) -> Result<Self, elements::Error> {
let length = u32::from(VarUint32::deserialize(reader)?) as usize;
let inner_buffer = buffered_read!(ENTRIES_BUFFER_LENGTH, length, reader);
let buf_length = inner_buffer.len();
let cursor = io::Cursor::new(inner_buffer);
Ok(SectionReader {
cursor: cursor,
declared_length: buf_length,
})
}
pub fn close(self) -> Result<(), io::Error> {
let cursor = self.cursor;
let buf_length = self.declared_length;
if cursor.position() != buf_length {
Err(io::Error::InvalidData)
} else {
Ok(())
}
}
}
impl io::Read for SectionReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<()> {
self.cursor.read(buf)?;
Ok(())
}
}
fn read_entries<R: io::Read, T: Deserialize<Error=elements::Error>>(reader: &mut R)
-> Result<Vec<T>, elements::Error>
{
let mut section_reader = SectionReader::new(reader)?;
let result = CountedList::<T>::deserialize(&mut section_reader)?.into_inner();
section_reader.close()?;
Ok(result)
}
/// Custom section.
#[derive(Debug, Default, Clone, PartialEq)]
pub struct CustomSection {
name: String,
payload: Vec<u8>,
}
impl CustomSection {
/// Creates a new custom section with the given name and payload.
pub fn new(name: String, payload: Vec<u8>) -> CustomSection {
CustomSection { name, payload }
}
/// Name of the custom section.
pub fn name(&self) -> &str {
&self.name
}
/// Payload of the custom section.
pub fn payload(&self) -> &[u8] {
&self.payload
}
/// Name of the custom section (mutable).
pub fn name_mut(&mut self) -> &mut String {
&mut self.name
}
/// Payload of the custom section (mutable).
pub fn payload_mut(&mut self) -> &mut Vec<u8> {
&mut self.payload
}
}
impl Deserialize for CustomSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
let section_length: usize = u32::from(VarUint32::deserialize(reader)?) as usize;
let buf = buffered_read!(16384, section_length, reader);
let mut cursor = io::Cursor::new(&buf[..]);
let name = String::deserialize(&mut cursor)?;
let payload = buf[cursor.position() as usize..].to_vec();
Ok(CustomSection { name: name, payload: payload })
}
}
impl Serialize for CustomSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
use io::Write;
let mut counted_writer = CountedWriter::new(writer);
self.name.serialize(&mut counted_writer)?;
counted_writer.write(&self.payload[..])?;
counted_writer.done()?;
Ok(())
}
}
/// Section with type declarations.
#[derive(Debug, Default, Clone, PartialEq)]
pub struct TypeSection(Vec<Type>);
impl TypeSection {
/// New type section with provided types.
pub fn with_types(types: Vec<Type>) -> Self {
TypeSection(types)
}
/// List of type declarations.
pub fn types(&self) -> &[Type] {
&self.0
}
/// List of type declarations (mutable).
pub fn types_mut(&mut self) -> &mut Vec<Type> {
&mut self.0
}
}
impl Deserialize for TypeSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(TypeSection(read_entries(reader)?))
}
}
impl Serialize for TypeSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<Type, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Section of the imports definition.
#[derive(Debug, Default, Clone, PartialEq)]
pub struct ImportSection(Vec<ImportEntry>);
impl ImportSection {
/// New import section with provided types.
pub fn with_entries(entries: Vec<ImportEntry>) -> Self {
ImportSection(entries)
}
/// List of import entries.
pub fn entries(&self) -> &[ImportEntry] {
&self.0
}
/// List of import entries (mutable).
pub fn entries_mut(&mut self) -> &mut Vec<ImportEntry> {
&mut self.0
}
/// Returns number of functions.
pub fn functions(&self) -> usize {
self.0.iter()
.filter(|entry| match entry.external() { &External::Function(_) => true, _ => false })
.count()
}
/// Returns number of globals
pub fn globals(&self) -> usize {
self.0.iter()
.filter(|entry| match entry.external() { &External::Global(_) => true, _ => false })
.count()
}
}
impl Deserialize for ImportSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(ImportSection(read_entries(reader)?))
}
}
impl Serialize for ImportSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<ImportEntry, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Section with function signatures definition.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct FunctionSection(Vec<Func>);
impl FunctionSection {
/// New function signatures section with provided entries.
pub fn with_entries(entries: Vec<Func>) -> Self {
FunctionSection(entries)
}
/// List of all functions in the section, mutable.
pub fn entries_mut(&mut self) -> &mut Vec<Func> {
&mut self.0
}
/// List of all functions in the section.
pub fn entries(&self) -> &[Func] {
&self.0
}
}
impl Deserialize for FunctionSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(FunctionSection(read_entries(reader)?))
}
}
impl Serialize for FunctionSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<VarUint32, _>(
data.len(),
data.into_iter().map(|func| func.type_ref().into())
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Section with table definition (currently only one is allowed).
#[derive(Default, Debug, Clone, PartialEq)]
pub struct TableSection(Vec<TableType>);
impl TableSection {
/// Table entries.
pub fn entries(&self) -> &[TableType] {
&self.0
}
/// New table section with provided table entries.
pub fn with_entries(entries: Vec<TableType>) -> Self {
TableSection(entries)
}
/// Mutable table entries.
pub fn entries_mut(&mut self) -> &mut Vec<TableType> {
&mut self.0
}
}
impl Deserialize for TableSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(TableSection(read_entries(reader)?))
}
}
impl Serialize for TableSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<TableType, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Section with table definition (currently only one entry is allowed).
#[derive(Default, Debug, Clone, PartialEq)]
pub struct MemorySection(Vec<MemoryType>);
impl MemorySection {
/// List of all memory entries in the section
pub fn entries(&self) -> &[MemoryType] {
&self.0
}
/// New memory section with memory types.
pub fn with_entries(entries: Vec<MemoryType>) -> Self {
MemorySection(entries)
}
/// Mutable list of all memory entries in the section.
pub fn entries_mut(&mut self) -> &mut Vec<MemoryType> {
&mut self.0
}
}
impl Deserialize for MemorySection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(MemorySection(read_entries(reader)?))
}
}
impl Serialize for MemorySection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<MemoryType, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Globals definition section.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct GlobalSection(Vec<GlobalEntry>);
impl GlobalSection {
/// List of all global entries in the section.
pub fn entries(&self) -> &[GlobalEntry] {
&self.0
}
/// New global section from list of global entries.
pub fn with_entries(entries: Vec<GlobalEntry>) -> Self {
GlobalSection(entries)
}
/// List of all global entries in the section (mutable).
pub fn entries_mut(&mut self) -> &mut Vec<GlobalEntry> {
&mut self.0
}
}
impl Deserialize for GlobalSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(GlobalSection(read_entries(reader)?))
}
}
impl Serialize for GlobalSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<GlobalEntry, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// List of exports definition.
#[derive(Debug, Default, Clone, PartialEq)]
pub struct ExportSection(Vec<ExportEntry>);
impl ExportSection {
/// List of all export entries in the section.
pub fn entries(&self) -> &[ExportEntry] {
&self.0
}
/// New export section from list of export entries.
pub fn with_entries(entries: Vec<ExportEntry>) -> Self {
ExportSection(entries)
}
/// List of all export entries in the section (mutable).
pub fn entries_mut(&mut self) -> &mut Vec<ExportEntry> {
&mut self.0
}
}
impl Deserialize for ExportSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(ExportSection(read_entries(reader)?))
}
}
impl Serialize for ExportSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<ExportEntry, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Section with function bodies of the module.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct CodeSection(Vec<FuncBody>);
impl CodeSection {
/// New code section with specified function bodies.
pub fn with_bodies(bodies: Vec<FuncBody>) -> Self {
CodeSection(bodies)
}
/// All function bodies in the section.
pub fn bodies(&self) -> &[FuncBody] {
&self.0
}
/// All function bodies in the section, mutable.
pub fn bodies_mut(&mut self) -> &mut Vec<FuncBody> {
&mut self.0
}
}
impl Deserialize for CodeSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(CodeSection(read_entries(reader)?))
}
}
impl Serialize for CodeSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<FuncBody, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Element entries section.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct ElementSection(Vec<ElementSegment>);
impl ElementSection {
/// New elements section.
pub fn with_entries(entries: Vec<ElementSegment>) -> Self {
ElementSection(entries)
}
/// New elements entries in the section.
pub fn entries(&self) -> &[ElementSegment] {
&self.0
}
/// List of all data entries in the section (mutable).
pub fn entries_mut(&mut self) -> &mut Vec<ElementSegment> {
&mut self.0
}
}
impl Deserialize for ElementSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(ElementSection(read_entries(reader)?))
}
}
impl Serialize for ElementSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<ElementSegment, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
/// Data entries definitions.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct DataSection(Vec<DataSegment>);
impl DataSection {
/// New data section.
pub fn with_entries(entries: Vec<DataSegment>) -> Self {
DataSection(entries)
}
/// List of all data entries in the section.
pub fn entries(&self) -> &[DataSegment] {
&self.0
}
/// List of all data entries in the section (mutable).
pub fn entries_mut(&mut self) -> &mut Vec<DataSegment> {
&mut self.0
}
}
impl Deserialize for DataSection {
type Error = Error;
fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> {
Ok(DataSection(read_entries(reader)?))
}
}
impl Serialize for DataSection {
type Error = Error;
fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> {
let mut counted_writer = CountedWriter::new(writer);
let data = self.0;
let counted_list = CountedListWriter::<DataSegment, _>(
data.len(),
data.into_iter().map(Into::into),
);
counted_list.serialize(&mut counted_writer)?;
counted_writer.done()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::super::{
deserialize_buffer, deserialize_file, ValueType, InitExpr, DataSegment,
serialize, ElementSegment, Instructions, BlockType, Local, FuncBody,
};
use super::{Section, TypeSection, Type, DataSection, ElementSection, CodeSection};
#[test]
fn import_section() {
let module = deserialize_file("./res/cases/v1/test5.wasm").expect("Should be deserialized");
let mut found = false;
for section in module.sections() {
match section {
&Section::Import(ref import_section) => {
assert_eq!(25, import_section.entries().len());
found = true
},
_ => { }
}
}
assert!(found, "There should be import section in test5.wasm");
}
fn functions_test_payload() -> &'static [u8] {
&[
// functions section id
0x03u8,
// functions section length
0x87, 0x80, 0x80, 0x80, 0x0,
// number of functions
0x04,
// type reference 1
0x01,
// type reference 2
0x86, 0x80, 0x00,
// type reference 3
0x09,
// type reference 4
0x33
]
}
#[test]
fn fn_section_detect() {
let section: Section =
deserialize_buffer(functions_test_payload()).expect("section to be deserialized");
match section {
Section::Function(_) => {},
_ => {
panic!("Payload should be recognized as functions section")
}
}
}
#[test]
fn fn_section_number() {
let section: Section =
deserialize_buffer(functions_test_payload()).expect("section to be deserialized");
match section {
Section::Function(fn_section) => {
assert_eq!(4, fn_section.entries().len(), "There should be 4 functions total");
},
_ => {
// will be catched by dedicated test
}
}
}
#[test]
fn fn_section_ref() {
let section: Section =
deserialize_buffer(functions_test_payload()).expect("section to be deserialized");
match section {
Section::Function(fn_section) => {
assert_eq!(6, fn_section.entries()[1].type_ref());
},
_ => {
// will be catched by dedicated test
}
}
}
fn types_test_payload() -> &'static [u8] {
&[
// section length
11,
// 2 functions
2,
// func 1, form =1
0x60,
// param_count=1
1,
// first param
0x7e, // i64
// no return params
0x00,
// func 2, form=1
0x60,
// param_count=2
2,
// first param
0x7e,
// second param
0x7d,
// return param (is_present, param_type)
0x01, 0x7e
]
}
#[test]
fn type_section_len() {
let type_section: TypeSection =
deserialize_buffer(types_test_payload()).expect("type_section be deserialized");
assert_eq!(type_section.types().len(), 2);
}
#[test]
fn type_section_infer() {
let type_section: TypeSection =
deserialize_buffer(types_test_payload()).expect("type_section be deserialized");
let t1 = match &type_section.types()[1] {
&Type::Function(ref func_type) => func_type
};
assert_eq!(Some(ValueType::I64), t1.return_type());
assert_eq!(2, t1.params().len());
}
fn export_payload() -> &'static [u8] {
&[
// section id
0x07,
// section length
28,
// 6 entries
6,
// func "A", index 6
// [name_len(1-5 bytes), name_bytes(name_len, internal_kind(1byte), internal_index(1-5 bytes)])
0x01, 0x41, 0x01, 0x86, 0x80, 0x00,
// func "B", index 8
0x01, 0x42, 0x01, 0x86, 0x00,
// func "C", index 7
0x01, 0x43, 0x01, 0x07,
// memory "D", index 0
0x01, 0x44, 0x02, 0x00,
// func "E", index 1
0x01, 0x45, 0x01, 0x01,
// func "F", index 2
0x01, 0x46, 0x01, 0x02
]
}
#[test]
fn export_detect() {
let section: Section =
deserialize_buffer(export_payload()).expect("section to be deserialized");
match section {
Section::Export(_) => {},
_ => {
panic!("Payload should be recognized as export section")
}
}
}
fn code_payload() -> &'static [u8] {
&[
// sectionid
0x0Au8,
// section length, 32
0x20,
// body count
0x01,
// body 1, length 30
0x1E,
0x01, 0x01, 0x7F, // local i32 (one collection of length one of type i32)
0x02, 0x7F, // block i32
0x23, 0x00, // get_global 0
0x21, 0x01, // set_local 1
0x23, 0x00, // get_global 0
0x20, 0x00, // get_local 0
0x6A, // i32.add
0x24, 0x00, // set_global 0
0x23, 0x00, // get_global 0
0x41, 0x0F, // i32.const 15
0x6A, // i32.add
0x41, 0x70, // i32.const -16
0x71, // i32.and
0x24, 0x00, // set_global 0
0x20, 0x01, // get_local 1
0x0B,
0x0B,
]
}
#[test]
fn code_detect() {
let section: Section =
deserialize_buffer(code_payload()).expect("section to be deserialized");
match section {
Section::Code(_) => {},
_ => {
panic!("Payload should be recognized as a code section")
}
}
}
fn data_payload() -> &'static [u8] {
&[
0x0bu8, // section id
20, // 20 bytes overall
0x01, // number of segments
0x00, // index
0x0b, // just `end` op
0x10,
// 16x 0x00
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
]
}
#[test]
fn data_section_ser() {
let data_section = DataSection::with_entries(
vec![DataSegment::new(0u32, Some(InitExpr::empty()), vec![0u8; 16])]
);
let buf = serialize(data_section).expect("Data section to be serialized");
assert_eq!(buf, vec![
20u8, // 19 bytes overall
0x01, // number of segments
0x00, // index
0x0b, // just `end` op
16, // value of length 16
0x00, 0x00, 0x00, 0x00, // 16x 0x00 as in initialization
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
]);
}
#[test]
fn data_section_detect() {
let section: Section =
deserialize_buffer(data_payload()).expect("section to be deserialized");
match section {
Section::Data(_) => {},
_ => {
panic!("Payload should be recognized as a data section")
}
}
}
#[test]
fn element_section_ser() {
let element_section = ElementSection::with_entries(
vec![ElementSegment::new(0u32, Some(InitExpr::empty()), vec![0u32; 4])]
);
let buf = serialize(element_section).expect("Element section to be serialized");
assert_eq!(buf, vec![
08u8, // 8 bytes overall
0x01, // number of segments
0x00, // index
0x0b, // just `end` op
0x04, // 4 elements
0x00, 0x00, 0x00, 0x00 // 4x 0x00 as in initialization
]);
}
#[test]
fn code_section_ser() {
use super::super::Instruction::*;
let code_section = CodeSection::with_bodies(
vec![
FuncBody::new(
vec![Local::new(1, ValueType::I32)],
Instructions::new(vec![
Block(BlockType::Value(ValueType::I32)),
GetGlobal(0),
End,
End,
])
)
]);
let buf = serialize(code_section).expect("Code section to be serialized");
assert_eq!(buf, vec![
11u8, // 11 bytes total section size
0x01, // 1 function
9, // function #1 total code size
1, // 1 local variable declaration
1, // amount of variables
0x7f, // type of variable (7-bit, -0x01), negative
0x02, // block
0x7f, // block return type (7-bit, -0x01), negative
0x23, 0x00, // get_global(0)
0x0b, // block end
0x0b, // function end
]);
}
#[test]
fn start_section() {
let section: Section = deserialize_buffer(&[08u8, 01u8, 00u8]).expect("Start section to deserialize");
if let Section::Start(_) = section {
} else {
panic!("Payload should be a start section");
}
let serialized = serialize(section).expect("Start section to successfully serializen");
assert_eq!(serialized, vec![08u8, 01u8, 00u8]);
}
}
| 24.571057 | 104 | 0.65142 |
f5cac3d0d1bbd3bbc35c2c9920891b7454c4ca7d | 11,831 | // btleplug Source Code File
//
// Copyright 2020 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//
// Some portions of this file are taken and/or modified from Rumble
// (https://github.com/mwylde/rumble), using a dual MIT/Apache License under the
// following copyright:
//
// Copyright (c) 2014 The Rust Project Developers
use super::{bindings, ble::characteristic::BLECharacteristic, ble::device::BLEDevice, utils};
use crate::{
api::{
AdapterManager, AddressType, BDAddr, CentralEvent, Characteristic, NotificationHandler,
Peripheral as ApiPeripheral, PeripheralProperties, ValueNotification, WriteType, UUID,
},
common::util,
Error, Result,
};
use dashmap::DashMap;
use std::{
collections::BTreeSet,
fmt::{self, Debug, Display, Formatter},
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, Mutex},
};
use bindings::windows::{devices::bluetooth::advertisement::*, storage::streams::DataReader};
#[derive(Clone)]
pub struct Peripheral {
device: Arc<Mutex<Option<BLEDevice>>>,
adapter: AdapterManager<Self>,
address: BDAddr,
properties: Arc<Mutex<PeripheralProperties>>,
characteristics: Arc<Mutex<BTreeSet<Characteristic>>>,
connected: Arc<AtomicBool>,
ble_characteristics: Arc<DashMap<UUID, BLECharacteristic>>,
notification_handlers: Arc<Mutex<Vec<NotificationHandler>>>,
}
impl Peripheral {
pub fn new(adapter: AdapterManager<Self>, address: BDAddr) -> Self {
let device = Arc::new(Mutex::new(None));
let mut properties = PeripheralProperties::default();
properties.address = address;
let properties = Arc::new(Mutex::new(properties));
let characteristics = Arc::new(Mutex::new(BTreeSet::new()));
let connected = Arc::new(AtomicBool::new(false));
let ble_characteristics = Arc::new(DashMap::new());
let notification_handlers = Arc::new(Mutex::new(Vec::new()));
Peripheral {
device,
adapter,
address,
properties,
characteristics,
connected,
ble_characteristics,
notification_handlers,
}
}
pub fn update_properties(&self, args: &BluetoothLEAdvertisementReceivedEventArgs) {
let mut properties = self.properties.lock().unwrap();
let advertisement = args.advertisement().unwrap();
properties.discovery_count += 1;
// Advertisements are cumulative: set/replace data only if it's set
if let Ok(name) = advertisement.local_name() {
if !name.is_empty() {
properties.local_name = Some(name.to_string());
}
}
if let Ok(manufacturer_data) = advertisement.manufacturer_data() {
properties.manufacturer_data = manufacturer_data
.into_iter()
.map(|d| {
let company_id = d.company_id().unwrap();
let buffer = d.data().unwrap();
let reader = DataReader::from_buffer(&buffer).unwrap();
let len = reader.unconsumed_buffer_length().unwrap() as usize;
let mut data = vec![0u8; len];
reader.read_bytes(&mut data).unwrap();
(company_id, data)
})
.collect();
}
// windows does not provide the address type in the advertisement event args but only in the device object
// https://social.msdn.microsoft.com/Forums/en-US/c71d51a2-56a1-425a-9063-de44fda48766/bluetooth-address-public-or-random?forum=wdk
properties.address_type = AddressType::default();
properties.has_scan_response =
args.advertisement_type().unwrap() == BluetoothLEAdvertisementType::ScanResponse;
properties.tx_power_level = args
.raw_signal_strength_in_dbm()
.ok()
.map(|rssi| rssi as i8);
}
}
impl Display for Peripheral {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let connected = if self.is_connected() {
" connected"
} else {
""
};
let properties = self.properties.lock().unwrap();
write!(
f,
"{} {}{}",
self.address,
properties
.local_name
.clone()
.unwrap_or_else(|| "(unknown)".to_string()),
connected
)
}
}
impl Debug for Peripheral {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let connected = if self.is_connected() {
" connected"
} else {
""
};
let properties = self.properties.lock().unwrap();
let characteristics = self.characteristics.lock().unwrap();
write!(
f,
"{} properties: {:?}, characteristics: {:?} {}",
self.address, *properties, *characteristics, connected
)
}
}
impl ApiPeripheral for Peripheral {
/// Returns the address of the peripheral.
fn address(&self) -> BDAddr {
self.address.clone()
}
/// Returns the set of properties associated with the peripheral. These may be updated over time
/// as additional advertising reports are received.
fn properties(&self) -> PeripheralProperties {
let l = self.properties.lock().unwrap();
l.clone()
}
/// The set of characteristics we've discovered for this device. This will be empty until
/// `discover_characteristics` is called.
fn characteristics(&self) -> BTreeSet<Characteristic> {
let l = self.characteristics.lock().unwrap();
l.clone()
}
/// Returns true iff we are currently connected to the device.
fn is_connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Creates a connection to the device. This is a synchronous operation; if this method returns
/// Ok there has been successful connection. Note that peripherals allow only one connection at
/// a time. Operations that attempt to communicate with a device will fail until it is connected.
fn connect(&self) -> Result<()> {
let connected = self.connected.clone();
let adapter_clone = self.adapter.clone();
let address_clone = self.address.clone();
let device = BLEDevice::new(
self.address,
Box::new(move |is_connected| {
connected.store(is_connected, Ordering::Relaxed);
if !is_connected {
adapter_clone.emit(CentralEvent::DeviceDisconnected(address_clone));
}
}),
)?;
device.connect()?;
let mut d = self.device.lock().unwrap();
*d = Some(device);
self.adapter
.emit(CentralEvent::DeviceConnected(self.address));
Ok(())
}
/// Terminates a connection to the device. This is a synchronous operation.
fn disconnect(&self) -> Result<()> {
let winrt_error = |e| Error::Other(format!("{:?}", e));
let mut device = self.device.lock().map_err(winrt_error)?;
*device = None;
self.adapter
.emit(CentralEvent::DeviceDisconnected(self.address));
Ok(())
}
/// Discovers all characteristics for the device. This is a synchronous operation.
fn discover_characteristics(&self) -> Result<Vec<Characteristic>> {
let device = self.device.lock().unwrap();
if let Some(ref device) = *device {
let mut characteristics_result = vec![];
let characteristics = device.discover_characteristics()?;
for characteristic in characteristics {
let uuid = utils::to_uuid(&characteristic.uuid().unwrap());
let properties =
utils::to_char_props(&characteristic.characteristic_properties().unwrap());
let chara = Characteristic {
uuid,
start_handle: 0,
end_handle: 0,
value_handle: 0,
properties,
};
characteristics_result.push(chara);
self.ble_characteristics
.entry(uuid)
.or_insert_with(|| BLECharacteristic::new(characteristic));
}
return Ok(characteristics_result);
}
Err(Error::NotConnected)
}
/// Write some data to the characteristic. Returns an error if the write couldn't be send or (in
/// the case of a write-with-response) if the device returns an error.
fn write(
&self,
characteristic: &Characteristic,
data: &[u8],
write_type: WriteType,
) -> Result<()> {
if let Some(ble_characteristic) = self.ble_characteristics.get(&characteristic.uuid) {
ble_characteristic.write_value(data, write_type)
} else {
Err(Error::NotSupported("write".into()))
}
}
/// Sends a read-by-type request to device for the range of handles covered by the
/// characteristic and for the specified declaration UUID. See
/// [here](https://www.bluetooth.com/specifications/gatt/declarations) for valid UUIDs.
/// Synchronously returns either an error or the device response.
fn read_by_type(&self, characteristic: &Characteristic, _uuid: UUID) -> Result<Vec<u8>> {
if let Some(ble_characteristic) = self.ble_characteristics.get(&characteristic.uuid) {
return ble_characteristic.read_value();
} else {
Err(Error::NotSupported("read_by_type".into()))
}
}
/// Enables either notify or indicate (depending on support) for the specified characteristic.
/// This is a synchronous call.
fn subscribe(&self, characteristic: &Characteristic) -> Result<()> {
if let Some(mut ble_characteristic) = self.ble_characteristics.get_mut(&characteristic.uuid)
{
let notification_handlers = self.notification_handlers.clone();
let uuid = characteristic.uuid;
ble_characteristic.subscribe(Box::new(move |value| {
let notification = ValueNotification {
uuid: uuid,
handle: None,
value,
};
util::invoke_handlers(¬ification_handlers, ¬ification);
}))
} else {
Err(Error::NotSupported("subscribe".into()))
}
}
/// Disables either notify or indicate (depending on support) for the specified characteristic.
/// This is a synchronous call.
fn unsubscribe(&self, characteristic: &Characteristic) -> Result<()> {
if let Some(mut ble_characteristic) = self.ble_characteristics.get_mut(&characteristic.uuid)
{
ble_characteristic.unsubscribe()
} else {
Err(Error::NotSupported("unsubscribe".into()))
}
}
/// Registers a handler that will be called when value notification messages are received from
/// the device. This method should only be used after a connection has been established. Note
/// that the handler will be called in a common thread, so it should not block.
fn on_notification(&self, handler: NotificationHandler) {
let mut list = self.notification_handlers.lock().unwrap();
list.push(handler);
}
fn read(&self, characteristic: &Characteristic) -> Result<Vec<u8>> {
if let Some(ble_characteristic) = self.ble_characteristics.get(&characteristic.uuid) {
return ble_characteristic.read_value();
} else {
Err(Error::NotSupported("read".into()))
}
}
}
| 38.790164 | 139 | 0.605021 |
0e96b00c60d959a811a3d45f7a915720e3212891 | 3,642 | // Copyright (c) The cargo-guppy Contributors
// SPDX-License-Identifier: MIT OR Apache-2.0
use crate::{Platform, TargetFeatures};
use cfg_expr::targets::ALL_BUILTINS;
use proptest::{collection::btree_set, prelude::*, sample::select};
use std::borrow::Cow;
/// ## Helpers for property testing
///
/// The methods in this section allow `Platform` instances to be used in property-based testing
/// scenarios.
///
/// Currently, [proptest 1](https://docs.rs/proptest/1) is supported if the `proptest1`
/// feature is enabled.
impl Platform<'static> {
/// Given a way to generate `TargetFeatures` instances, this returns a `Strategy` that generates
/// a platform at random.
///
/// Requires the `proptest1` feature to be enabled.
///
/// ## Examples
///
/// ```
/// use proptest::prelude::*;
/// use target_spec::{Platform, TargetFeatures};
///
/// // target_features is a strategy that always produces TargetFeatures::Unknown.
/// let target_features = Just(TargetFeatures::Unknown);
/// let strategy = Platform::strategy(target_features);
/// ```
pub fn strategy(
target_features: impl Strategy<Value = TargetFeatures>,
) -> impl Strategy<Value = Platform<'static>> {
let flags = btree_set(flag_strategy(), 0..3);
(0..ALL_BUILTINS.len(), target_features, flags).prop_map(|(idx, target_features, flags)| {
let mut platform =
Platform::new(ALL_BUILTINS[idx].triple, target_features).expect("known triple");
platform.add_flags(flags);
platform
})
}
/// A version of `strategy` that allows target triples to be filtered.
///
/// Requires the `proptest1` feature to be enabled.
pub fn filtered_strategy(
triple_filter: impl Fn(&'static str) -> bool,
target_features: impl Strategy<Value = TargetFeatures>,
) -> impl Strategy<Value = Platform<'static>> {
let filtered: Vec<_> = ALL_BUILTINS
.iter()
.filter(|target_info| triple_filter(target_info.triple))
.collect();
let flags = btree_set(flag_strategy(), 0..3);
(0..filtered.len(), target_features, flags).prop_map(
move |(idx, target_features, flags)| {
let mut platform =
Platform::new(filtered[idx].triple, target_features).expect("known triple");
platform.add_flags(flags);
platform
},
)
}
}
/// Picks a random flag from a list of known flags.
pub fn flag_strategy() -> impl Strategy<Value = &'static str> {
static KNOWN_FLAGS: &[&str] = &["cargo_web", "test-flag", "abc", "foo", "bar", "flag-test"];
select(KNOWN_FLAGS)
}
/// The `Arbitrary` implementation for `TargetFeatures` uses a predefined list of features.
impl Arbitrary for TargetFeatures {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
// https://doc.rust-lang.org/reference/attributes/codegen.html#available-features
static KNOWN_FEATURES: &[&str] = &[
"aes", "avx", "avx2", "bmi1", "bmi2", "fma", "rdrand", "sha", "sse", "sse2", "sse3",
"sse4.1", "sse4.2", "ssse3", "xsave", "xsavec", "xsaveopt", "xsaves",
];
let known_features_strategy = select(KNOWN_FEATURES).prop_map(Cow::Borrowed);
prop_oneof![
Just(TargetFeatures::Unknown),
Just(TargetFeatures::All),
btree_set(known_features_strategy, 0..8).prop_map(TargetFeatures::Features),
]
.boxed()
}
}
| 39.16129 | 100 | 0.618342 |
2964a0b4e082959e783fad4f3a8200ea16141dcf | 3,315 | use std::collections::BTreeMap;
use crate::instantiate::Assignment;
use crate::schema::Member;
use crate::schema::Node;
/// Assign nodes to actual members in the system.
///
/// Right now the assignment is the simplest possible where we just take
/// members in "some" order and assign them to nodes until we have
/// covered all.
// TODO: We very likely want to have some more brains in here (such as
// a consistent hashing algorithm) or we will incur potentially
// excessive reconfigurations in the system whenever a node is
// added or removed.
pub fn simple_assign<'n, 'm, N, M>(nodes: N, mut members: M) -> Option<Assignment>
where
N: Iterator<Item = &'n Node> + ExactSizeIterator,
M: Iterator<Item = &'m Member> + ExactSizeIterator,
{
// If the configuration prescribes more nodes than we have members
// in the system we can't find an assignment.
// TODO: In theory we could map two or more nodes to a single
// member.
if members.len() < nodes.len() {
return None;
}
let assignment = BTreeMap::new();
Some(nodes.fold(assignment, |mut assignment, uuid| {
match members.next() {
Some(member) => {
let node = *member.addr();
let _result = assignment.insert(*uuid, node);
debug_assert_eq!(_result, None);
}
None => unreachable!(),
}
assignment
}))
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use maplit::btreemap;
use maplit::btreeset;
use uuid::Uuid;
use crate::schema::Addr;
use crate::schema::Member;
use crate::schema::RelCfg;
use crate::schema::Source;
#[test]
fn assign_insufficient_members() {
let uuid0 = Uuid::new_v4();
let uuid1 = Uuid::new_v4();
let node0 = Addr::Ip("127.0.0.1:1".parse().unwrap());
let config = btreemap! {
uuid0 => btreemap! { 0 => btreeset! {} },
uuid1 => btreemap! {
1 => btreeset! {
RelCfg::Source(Source::File(PathBuf::from("input.cmd")))
}
},
};
let members = btreeset! {
Member::new(node0),
};
let assignment = simple_assign(config.keys(), members.iter());
assert_eq!(assignment, None);
}
#[test]
fn assign_members() {
let mut uuids = vec![Uuid::new_v4(), Uuid::new_v4()];
uuids.sort();
let node0 = Addr::Ip("127.0.0.1:1".parse().unwrap());
let node1 = Addr::Ip("127.0.0.1:2".parse().unwrap());
let config = btreemap! {
uuids[0] => btreemap! {
1 => btreeset! {
RelCfg::Output(uuids[1], 1),
}
},
uuids[1] => btreemap! {
1 => btreeset! {
RelCfg::Input(0),
}
},
};
let members = btreeset! {
Member::new(node0.clone()),
Member::new(node1.clone()),
};
let assignment = simple_assign(config.keys(), members.iter()).unwrap();
let expected = btreemap! {
uuids[0] => node0,
uuids[1] => node1,
};
assert_eq!(assignment, expected);
}
}
| 29.078947 | 82 | 0.534238 |
23340a4aba2fefaef924fdfde30ce11e8b7ef0cf | 195 | // rustfmt-indent_style: Block
// Chain indent
fn main() {
let lorem = ipsum
.dolor()
.sit()
.amet()
.consectetur()
.adipiscing()
.elite();
}
| 15 | 30 | 0.471795 |
226db185ecabd10d07a0522c902dffb4626e25ea | 7,279 | use crate::{
ewin_com::{
_cfg::key::{keycmd::*, keys::*, keywhen::*},
global::*,
log::*,
model::*,
},
model::*,
terminal::*,
};
use std::io::Write;
impl EvtAct {
pub fn draw<T: Write>(term: &mut Terminal, out: &mut T, act_type: &ActType) {
Log::debug("EvtAct::term.keycmd", &term.keycmd);
Log::debug("EvtAct::draw.evt_act_type", &act_type);
Log::debug("EvtAct::draw.term.draw_parts_org", &term.draw_parts_org);
match act_type {
ActType::Draw(draw_parts) => {
if term.state.is_show_init_info {
let row_posi = term.curt().editor.row_posi;
term.curt().editor.clear_draw(out, row_posi);
term.state.is_show_init_info = false;
}
match &draw_parts {
DParts::MsgBar(msg) | DParts::AllMsgBar(msg) => {
if msg == &LANG.key_recording {
term.curt().mbar.set_keyrecord(&msg);
} else {
term.curt().mbar.set_err(&msg);
}
if let DParts::MsgBar(_) = draw_parts {
term.curt().mbar.draw_only(out);
} else if let DParts::AllMsgBar(_) = draw_parts {
term.draw(out, &DParts::All);
}
}
DParts::CtxMenu => {
term.set_draw_range_ctx_menu();
term.ctx_menu_group.draw_only(out);
}
DParts::Prompt => EvtAct::draw_prompt(out, term),
DParts::All | DParts::Editor | DParts::ScrollUpDown(_) => {
// If the last time was an err msg, redraw the whole to delete it.
if let DParts::MsgBar(_) | DParts::AllMsgBar(_) = &term.draw_parts_org {
term.curt().editor.draw_range = EditorDrawRange::All;
}
term.draw(out, &draw_parts);
}
};
term.draw_parts_org = draw_parts.clone();
}
_ => {}
}
}
pub fn check_next_process<T: Write>(out: &mut T, term: &mut Terminal, act_type: ActType) -> Option<bool> {
match term.keycmd {
// Log at the time of Mouse Move is not output
KeyCmd::Null => {}
_ => {
Log::debug("evt_act_type", &act_type);
Log::debug("term.keycmd", &term.keycmd);
}
}
return match &act_type {
ActType::Next => None,
ActType::Draw(_) => {
EvtAct::draw(term, out, &act_type);
term.draw_cur(out);
Some(false)
}
ActType::Cancel => {
term.draw_cur(out);
Some(false)
}
ActType::Exit => Some(true),
};
}
pub fn match_event<T: Write>(keys: Keys, out: &mut T, term: &mut Terminal) -> bool {
// Support check for pressed keys
let act_type = EvtAct::set_keys(keys, term);
if let Some(rtn) = EvtAct::check_next_process(out, term, act_type) {
return rtn;
}
Terminal::hide_cur();
Log::info("term.keycmd", &term.keycmd);
// Pressed keys Pre-check
let act_type = EvtAct::init_event(term);
if let Some(rtn) = EvtAct::check_next_process(out, term, act_type) {
return rtn;
}
// msg
EvtAct::set_org_msg(&mut term.curt());
term.curt().mbar.clear_mag();
let keywhen = term.get_when(&keys);
Log::info("keywhen", &keywhen);
match keywhen {
KeyWhen::CtxMenuFocus => {
// ctx_menu
let act_type = EvtAct::ctrl_ctx_menu(term);
if let Some(rtn) = EvtAct::check_next_process(out, term, act_type) {
return rtn;
}
}
KeyWhen::EditorFocus => {
// headerbar
let act_type = EvtAct::ctrl_headerbar(term);
if let Some(rtn) = EvtAct::check_next_process(out, term, act_type) {
return rtn;
}
// editor
let act_type = EvtAct::ctrl_editor(term);
if let Some(rtn) = EvtAct::check_next_process(out, term, act_type) {
return rtn;
}
// statusbar
let act_type = EvtAct::ctrl_statusbar(term);
if let Some(rtn) = EvtAct::check_next_process(out, term, act_type) {
return rtn;
}
}
KeyWhen::PromptFocus => {
// prom
let act_type = EvtAct::ctrl_prom(term);
if let Some(rtn) = EvtAct::check_next_process(out, term, act_type) {
return rtn;
}
}
_ => {}
};
return false;
}
pub fn set_keys(keys: Keys, term: &mut Terminal) -> ActType {
if !term.state.is_ctx_menu {
match keys {
Keys::MouseMove(_, _) => {
// Initialized for post-processing
term.keycmd = KeyCmd::Null;
return ActType::Cancel;
}
_ => Log::info("Pressed key", &keys),
};
}
term.set_keys(&keys);
if term.keycmd == KeyCmd::Unsupported {
return ActType::Draw(DParts::MsgBar(LANG.unsupported_operation.to_string()));
}
term.ctx_menu_group.set_keys(keys);
term.curt().prom.set_keys(keys);
return ActType::Next;
}
pub fn init_event(term: &mut Terminal) -> ActType {
Log::debug_key("init_event");
match &term.keycmd {
KeyCmd::CtxMenu(C_Cmd::MouseMove(_, _)) => return if term.state.is_ctx_menu { ActType::Next } else { ActType::Cancel },
KeyCmd::Resize => {
if Terminal::check_displayable() {
term.state.is_displayable = true;
term.curt().editor.draw_range = EditorDrawRange::None;
return if term.curt().state.is_nomal() { ActType::Draw(DParts::All) } else { ActType::Next };
} else {
term.state.is_displayable = false;
Terminal::clear_display();
Terminal::hide_cur();
println!("{}", &LANG.increase_height_width_terminal);
return ActType::Cancel;
}
}
KeyCmd::CloseFile => {
term.curt().prom.clear();
term.curt().state.clear();
return ActType::Next;
}
_ => return if term.state.is_displayable { ActType::Next } else { ActType::Cancel },
};
}
}
| 38.109948 | 132 | 0.453222 |
221d566b6ec1cc7127d40031dd13b15523698d9e | 21,833 | // The z85 codec logic is largely based on https://github.com/decafbad/z85
use super::Mechanism;
use crate::prelude::TryFrom;
use libzmq_sys as sys;
use byteorder::{BigEndian, ByteOrder};
use failure::Fail;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use std::{ffi::CString, fmt, option, os::raw::c_char};
static LETTERS: [u8; 85] = [
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x61, 0x62,
0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E,
0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C,
0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5A, 0x2E, 0x2D, 0x3A, 0x2B, 0x3D, 0x5E, 0x21, 0x2F, 0x2A, 0x3F,
0x26, 0x3C, 0x3E, 0x28, 0x29, 0x5B, 0x5D, 0x7B, 0x7D, 0x40, 0x25, 0x24,
0x23,
];
static OCTETS: [u8; 96] = [
0xFF, 0x44, 0xFF, 0x54, 0x53, 0x52, 0x48, 0xFF, 0x4B, 0x4C, 0x46, 0x41,
0xFF, 0x3F, 0x3E, 0x45, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x40, 0xFF, 0x49, 0x42, 0x4A, 0x47, 0x51, 0x24, 0x25, 0x26,
0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32,
0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x4D,
0xFF, 0x4E, 0x43, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x4F, 0xFF, 0x50, 0xFF, 0xFF,
];
// The size of a curve key in the z85 format.
const CURVE_CURVE_KEY_SIZE: usize = 40;
/// A error when encoding or decoding a `CurveKey`.
#[derive(Debug, Fail, Eq, PartialEq)]
pub enum CurveError {
#[fail(display = "input string must have len of 40 char")]
InvalidSize,
#[fail(
display = "input string contains invalid byte 0x{:2X} at offset {}",
byte, pos
)]
InvalidByte { pos: usize, byte: u8 },
}
fn z85_encode_chunk(input: &[u8]) -> [u8; 5] {
let mut num = BigEndian::read_u32(input) as usize;
let mut out = [0_u8; 5];
for i in (0..5).rev() {
out[i] = LETTERS[num % 85];
num /= 85;
}
out
}
fn z85_encode(input: &[u8]) -> Result<String, CurveError> {
let len = input.len();
if len % 4 != 0 {
panic!("input lenght must be div by 4");
}
let mut out = Vec::with_capacity(len / 4 * 5);
for chunk in input.chunks(4) {
out.extend_from_slice(&z85_encode_chunk(chunk));
}
unsafe { Ok(String::from_utf8_unchecked(out)) }
}
fn z85_decode_chunk(input: &[u8]) -> Result<[u8; 4], usize> {
let mut num: u32 = 0;
for (i, &byte) in input.iter().enumerate().take(5) {
num *= 85;
if byte < 0x20 || 0x7F < byte {
return Err(i);
}
let b = OCTETS[byte as usize - 32];
if b == 0xFF {
return Err(i);
}
num += u32::from(b);
}
let mut out = [0_u8; 4];
BigEndian::write_u32(&mut out, num);
Ok(out)
}
fn z85_decode(input: &str) -> Result<Vec<u8>, CurveError> {
let input = input.as_bytes();
let len = input.len();
if len % 5 != 0 {
panic!("input length must be div by 5");
}
let mut out = Vec::with_capacity(len / 5 * 4);
for (i, chunk) in input.chunks(5).enumerate() {
match z85_decode_chunk(chunk) {
Err(pos) => {
return Err(CurveError::InvalidByte {
pos: i * 5 + pos,
byte: chunk[pos],
});
}
Ok(out_chunk) => out.extend_from_slice(&out_chunk),
}
}
Ok(out)
}
/// A public `CURVE` cryptographic key in the printable [`Z85`] representation.
///
/// # Example
/// ```
/// use libzmq::auth::*;
///
/// let cert = CurveCert::new_unique();
///
/// // Generate a public key from a curve certificate.
/// let public = cert.public().to_owned();
/// // Derive a public key from a secret key.
/// let derived: CurvePublicKey = cert.secret().into();
///
/// assert_eq!(public, derived);
/// ```
///
/// [`Z85`]: https://rfc.zeromq.org/spec:32/Z85/
/// [`CurveCert::new_unique()`]: struct.CurveCert.html#method.new_unique
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(transparent)]
pub struct CurvePublicKey {
inner: CurveKey,
}
impl CurvePublicKey {
/// Create a new `CurvePublicKey` from a valid `Z85` string.
pub fn new<S>(text: S) -> Result<Self, CurveError>
where
S: Into<String>,
{
let inner = CurveKey::new(text)?;
Ok(Self { inner })
}
/// Returns the key in `Z85` encoded string.
pub fn as_str(&self) -> &str {
self.inner.as_str()
}
}
impl fmt::Display for CurvePublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl fmt::Debug for CurvePublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("CurvePublicKey")
.field("key", &self.as_str())
.finish()
}
}
impl From<CurveSecretKey> for CurvePublicKey {
fn from(secret: CurveSecretKey) -> Self {
let inner = CurveKey::from_secret(secret);
Self { inner }
}
}
impl<'a> From<&'a CurveSecretKey> for CurvePublicKey {
fn from(secret: &'a CurveSecretKey) -> Self {
let inner = CurveKey::from_secret(secret.to_owned());
Self { inner }
}
}
impl From<CurvePublicKey> for CurveKey {
fn from(public: CurvePublicKey) -> Self {
public.inner
}
}
impl<'a> From<&'a CurvePublicKey> for CurvePublicKey {
fn from(key: &'a CurvePublicKey) -> Self {
key.to_owned()
}
}
impl From<BinCurveKey> for CurvePublicKey {
fn from(key: BinCurveKey) -> Self {
let inner: CurveKey = key.into();
Self { inner }
}
}
impl<'a> From<&'a BinCurveKey> for CurvePublicKey {
fn from(key: &'a BinCurveKey) -> Self {
let inner: CurveKey = key.into();
Self { inner }
}
}
impl TryFrom<String> for CurvePublicKey {
type Error = CurveError;
fn try_from(text: String) -> Result<Self, CurveError> {
Self::new(text)
}
}
impl<'a> TryFrom<&'a str> for CurvePublicKey {
type Error = CurveError;
fn try_from(text: &'a str) -> Result<Self, CurveError> {
Self::new(text)
}
}
impl<'a> TryFrom<&'a String> for CurvePublicKey {
type Error = CurveError;
fn try_from(text: &'a String) -> Result<Self, CurveError> {
Self::new(text.as_str())
}
}
impl IntoIterator for CurvePublicKey {
type Item = Self;
type IntoIter = option::IntoIter<Self>;
fn into_iter(self) -> Self::IntoIter {
Some(self).into_iter()
}
}
impl<'a> IntoIterator for &'a CurvePublicKey {
type Item = Self;
type IntoIter = option::IntoIter<Self>;
fn into_iter(self) -> Self::IntoIter {
Some(self).into_iter()
}
}
/// A secret `CURVE` cryptographic key in the printable [`Z85`] representation.
///
/// Can be generated by [`CurveCert::new_unique()`].
///
/// [`Z85`]: https://rfc.zeromq.org/spec:32/Z85/
/// [`CurveCert::new_unique()`]: struct.CurveCert.html#method.new_unique
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(transparent)]
pub struct CurveSecretKey {
inner: CurveKey,
}
impl CurveSecretKey {
/// Create a new `CurveSecretKey` from a valid `Z85` string.
pub fn new<S>(text: S) -> Result<Self, CurveError>
where
S: Into<String>,
{
let inner = CurveKey::new(text)?;
Ok(Self { inner })
}
/// Returns the key in `Z85` encoded string.
pub fn as_str(&self) -> &str {
self.inner.as_str()
}
}
impl fmt::Debug for CurveSecretKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("CurveServerKey")
.field("key", &self.as_str())
.finish()
}
}
impl From<CurveSecretKey> for CurveKey {
fn from(public: CurveSecretKey) -> Self {
public.inner
}
}
impl<'a> From<&'a CurveSecretKey> for CurveSecretKey {
fn from(key: &'a CurveSecretKey) -> Self {
key.to_owned()
}
}
impl From<BinCurveKey> for CurveSecretKey {
fn from(key: BinCurveKey) -> Self {
let inner: CurveKey = key.into();
Self { inner }
}
}
impl<'a> From<&'a BinCurveKey> for CurveSecretKey {
fn from(key: &'a BinCurveKey) -> Self {
let inner: CurveKey = key.into();
Self { inner }
}
}
impl TryFrom<String> for CurveSecretKey {
type Error = CurveError;
fn try_from(text: String) -> Result<Self, CurveError> {
Self::new(text)
}
}
impl<'a> TryFrom<&'a str> for CurveSecretKey {
type Error = CurveError;
fn try_from(text: &'a str) -> Result<Self, CurveError> {
Self::new(text)
}
}
impl<'a> TryFrom<&'a String> for CurveSecretKey {
type Error = CurveError;
fn try_from(text: &'a String) -> Result<Self, CurveError> {
Self::new(text.as_str())
}
}
impl IntoIterator for CurveSecretKey {
type Item = Self;
type IntoIter = option::IntoIter<Self>;
fn into_iter(self) -> Self::IntoIter {
Some(self).into_iter()
}
}
impl<'a> IntoIterator for &'a CurveSecretKey {
type Item = Self;
type IntoIter = option::IntoIter<Self>;
fn into_iter(self) -> Self::IntoIter {
Some(self).into_iter()
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct CurveKey {
text: String,
}
impl CurveKey {
fn new<S>(text: S) -> Result<Self, CurveError>
where
S: Into<String>,
{
let text = text.into();
if text.len() != CURVE_CURVE_KEY_SIZE {
return Err(CurveError::InvalidSize);
}
let bytes = text.as_bytes();
for (pos, &byte) in bytes.iter().enumerate() {
if !LETTERS.contains(&byte) {
return Err(CurveError::InvalidByte { pos, byte });
}
}
Ok(Self { text })
}
fn from_secret<K>(secret: K) -> Self
where
K: Into<CurveKey>,
{
let secret = secret.into();
let public = unsafe {
CString::from_vec_unchecked(vec![0u8; CURVE_CURVE_KEY_SIZE])
};
let secret = unsafe { CString::from_vec_unchecked(secret.text.into()) };
let rc = unsafe {
sys::zmq_curve_public(
public.as_ptr() as *mut c_char,
secret.as_ptr() as *mut c_char,
)
};
assert_eq!(rc, 0, "curve not supported");
Self {
text: public.into_string().unwrap(),
}
}
fn as_str(&self) -> &str {
self.text.as_str()
}
}
impl<'a> From<&'a CurveKey> for CurveKey {
fn from(key: &'a CurveKey) -> Self {
key.to_owned()
}
}
impl TryFrom<String> for CurveKey {
type Error = CurveError;
fn try_from(text: String) -> Result<Self, CurveError> {
Self::new(text)
}
}
impl<'a> TryFrom<&'a str> for CurveKey {
type Error = CurveError;
fn try_from(text: &'a str) -> Result<Self, CurveError> {
Self::new(text)
}
}
impl<'a> TryFrom<&'a String> for CurveKey {
type Error = CurveError;
fn try_from(text: &'a String) -> Result<Self, CurveError> {
Self::new(text.as_str())
}
}
impl From<BinCurveKey> for CurveKey {
fn from(key: BinCurveKey) -> Self {
let text = z85_encode(key.as_bytes()).unwrap();
// No need to validate.
Self { text }
}
}
impl<'a> From<&'a BinCurveKey> for CurveKey {
fn from(key: &'a BinCurveKey) -> Self {
let text = z85_encode(key.as_bytes()).unwrap();
// No need to validate.
Self { text }
}
}
impl fmt::Display for CurveKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl Serialize for CurveKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_str(self)
}
}
impl<'de> Deserialize<'de> for CurveKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
TryFrom::try_from(s).map_err(de::Error::custom)
}
}
impl IntoIterator for CurveKey {
type Item = Self;
type IntoIter = option::IntoIter<Self>;
fn into_iter(self) -> Self::IntoIter {
Some(self).into_iter()
}
}
impl<'a> IntoIterator for &'a CurveKey {
type Item = Self;
type IntoIter = option::IntoIter<Self>;
fn into_iter(self) -> Self::IntoIter {
Some(self).into_iter()
}
}
/// A `CURVE` certificate containing a public and secret `CurveKey`.
///
/// # Example
/// ```
/// use libzmq::auth::CurveCert;
///
/// // Generate a new unique curve certificate.
/// let cert = CurveCert::new_unique();
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CurveCert {
public: CurvePublicKey,
secret: CurveSecretKey,
}
impl CurveCert {
/// Generate a new unique certificate.
pub fn new_unique() -> Self {
let public = unsafe {
CString::from_vec_unchecked(vec![0u8; CURVE_CURVE_KEY_SIZE])
};
let secret = public.clone();
let rc = unsafe {
sys::zmq_curve_keypair(
public.as_ptr() as *mut c_char,
secret.as_ptr() as *mut c_char,
)
};
assert_eq!(rc, 0, "curve not supported");
// No need to check if returned z85 key is valid.
let public = {
let inner = CurveKey {
text: public.into_string().unwrap(),
};
CurvePublicKey { inner }
};
let secret = {
let inner = CurveKey {
text: secret.into_string().unwrap(),
};
CurveSecretKey { inner }
};
Self { public, secret }
}
/// Returns a reference to the certificate's public key.
pub fn public(&self) -> &CurvePublicKey {
&self.public
}
/// Returns a reference to the certificate's secret key.
pub fn secret(&self) -> &CurveSecretKey {
&self.secret
}
}
// Binary representation of the `CURVE` key. This is what is sent
// down the wire.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) struct BinCurveKey {
bytes: Vec<u8>,
}
impl BinCurveKey {
pub(crate) fn new_unchecked(bytes: Vec<u8>) -> Self {
BinCurveKey { bytes }
}
pub(crate) fn as_bytes(&self) -> &[u8] {
self.bytes.as_slice()
}
}
impl From<CurveKey> for BinCurveKey {
fn from(key: CurveKey) -> Self {
let bytes = z85_decode(key.as_str()).unwrap();
BinCurveKey { bytes }
}
}
impl<'a> From<&'a CurveKey> for BinCurveKey {
fn from(key: &'a CurveKey) -> Self {
let bytes = z85_decode(key.as_str()).unwrap();
BinCurveKey { bytes }
}
}
impl From<CurvePublicKey> for BinCurveKey {
fn from(key: CurvePublicKey) -> Self {
let bytes = z85_decode(key.as_str()).unwrap();
BinCurveKey { bytes }
}
}
impl<'a> From<&'a CurvePublicKey> for BinCurveKey {
fn from(key: &'a CurvePublicKey) -> Self {
let bytes = z85_decode(key.as_str()).unwrap();
BinCurveKey { bytes }
}
}
impl From<CurveSecretKey> for BinCurveKey {
fn from(key: CurveSecretKey) -> Self {
let bytes = z85_decode(key.as_str()).unwrap();
BinCurveKey { bytes }
}
}
impl<'a> From<&'a CurveSecretKey> for BinCurveKey {
fn from(key: &'a CurveSecretKey) -> Self {
let bytes = z85_decode(key.as_str()).unwrap();
BinCurveKey { bytes }
}
}
/// Credentials for a `Curve` client.
///
/// # Example
/// ```
/// use libzmq::auth::*;
///
/// let server_cert = CurveCert::new_unique();
/// let client_cert = CurveCert::new_unique();
///
/// let creds = CurveClientCreds::new(server_cert.public())
/// .add_cert(client_cert);
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CurveClientCreds {
pub(crate) client: Option<CurveCert>,
pub(crate) server: CurvePublicKey,
}
impl CurveClientCreds {
/// Create a new `CurveClientCreds` from server's `CurvePublicKey`.
pub fn new<S>(server: S) -> Self
where
S: Into<CurvePublicKey>,
{
Self {
client: None,
server: server.into(),
}
}
/// Associates a client `CurveCert` with the credentials.
pub fn add_cert<C>(mut self, client: C) -> Self
where
C: Into<CurveCert>,
{
self.client = Some(client.into());
self
}
/// Returns a reference to the client certificate.
pub fn cert(&self) -> Option<&CurveCert> {
self.client.as_ref()
}
/// Returns a reference to the server public key.
pub fn server(&self) -> &CurvePublicKey {
&self.server
}
}
impl<'a> From<&'a CurveClientCreds> for CurveClientCreds {
fn from(creds: &'a CurveClientCreds) -> Self {
creds.to_owned()
}
}
impl<'a> From<&'a CurveClientCreds> for Mechanism {
fn from(creds: &'a CurveClientCreds) -> Self {
Mechanism::CurveClient(creds.to_owned())
}
}
impl From<CurveClientCreds> for Mechanism {
fn from(creds: CurveClientCreds) -> Self {
Mechanism::CurveClient(creds)
}
}
/// Credentials for a `Curve` server.
/// # Example
/// ```
/// use libzmq::auth::*;
///
/// let server_cert = CurveCert::new_unique();
///
/// let creds = CurveServerCreds::new(server_cert.secret());
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CurveServerCreds {
/// The server's `CurveSecretKey`.
pub(crate) secret: CurveSecretKey,
}
impl CurveServerCreds {
/// Create a new `CurveServerCreds` from a server secret `CurveSecretKey`.
pub fn new<S>(secret: S) -> Self
where
S: Into<CurveSecretKey>,
{
Self {
secret: secret.into(),
}
}
/// Returns a reference to the server secret key.
pub fn secret(&self) -> &CurveSecretKey {
&self.secret
}
}
impl<'a> From<&'a CurveServerCreds> for CurveServerCreds {
fn from(creds: &'a CurveServerCreds) -> Self {
creds.to_owned()
}
}
impl<'a> From<&'a CurveServerCreds> for Mechanism {
fn from(creds: &'a CurveServerCreds) -> Self {
Mechanism::CurveServer(creds.to_owned())
}
}
impl From<CurveServerCreds> for Mechanism {
fn from(creds: CurveServerCreds) -> Self {
Mechanism::CurveServer(creds)
}
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::quickcheck;
const Z85_RFC: &str = "HelloWorld";
const BIN_RFC: [u8; 8] = [0x86, 0x4F, 0xD2, 0x6F, 0xB5, 0x59, 0xF7, 0x5B];
const CURVE_KEY_INVALID_BYTE: &str =
"AAAAAAAAAAAAAAAAAAAA~AAAAAAAAAAAAAAAAAAA";
const CURVE_KEY_SECRET: &str = "sqe2ZQ%<<?*(MV2Shf%9=CtldI@T^^pgrML1S.F/";
const CURVE_KEY_PUBLIC: &str = "hb=GN9.(K*)]:{q*)XjsMgwfDTPJYh!w*n/xlIl+";
#[test]
fn curve_key_new_invalid_size() {
let err = CurveKey::new(Z85_RFC).unwrap_err();
assert_eq!(err, CurveError::InvalidSize);
}
#[test]
fn curve_key_new_invalid_byte() {
let err = CurveKey::new(CURVE_KEY_INVALID_BYTE).unwrap_err();
assert_eq!(
err,
CurveError::InvalidByte {
pos: 20,
byte: 0x7E
}
);
}
#[test]
fn curve_key_new() {
CurveKey::new(CURVE_KEY_SECRET).unwrap();
}
#[test]
fn curve_key_from_secret() {
let secret = CurveKey::new(CURVE_KEY_SECRET).unwrap();
let public = CurveKey::from_secret(&secret);
assert_eq!(public.as_str(), CURVE_KEY_PUBLIC);
}
#[test]
fn curve_cert_new_unique() {
CurveCert::new_unique();
}
#[test]
fn z85_encode_chunk_rfc() {
let curve_chunk_1 = z85_decode_chunk(&Z85_RFC.as_bytes()[..5]).unwrap();
let curve_chunk_2 = z85_decode_chunk(&Z85_RFC.as_bytes()[5..]).unwrap();
assert_eq!(curve_chunk_1, BIN_RFC[..4]);
assert_eq!(curve_chunk_2, BIN_RFC[4..]);
}
#[test]
fn z85_decode_chunk_rfc() {
let z85_chunk_1 = z85_encode_chunk(&BIN_RFC[..4]);
let z85_chunk_2 = z85_encode_chunk(&BIN_RFC[4..]);
assert_eq!(z85_chunk_1, Z85_RFC.as_bytes()[..5]);
assert_eq!(z85_chunk_2, Z85_RFC.as_bytes()[5..]);
}
#[test]
fn z85_encode_rfc() {
let curve_key = z85_decode(&Z85_RFC).unwrap();
assert_eq!(curve_key, BIN_RFC);
}
#[test]
fn z85_decode_rfc() {
let curve_key = z85_encode(&BIN_RFC).unwrap();
assert_eq!(curve_key, Z85_RFC);
}
quickcheck! {
fn codec_chunk_quickcheck(num: u32) -> bool {
let mut buf = [0_u8; 4];
BigEndian::write_u32(&mut buf, num);
let z85_chunk = z85_encode_chunk(&buf);
if let Ok(curve_chunk) = z85_decode_chunk(&z85_chunk) {
if curve_chunk == buf {
return true;
}
}
false
}
}
quickcheck! {
fn codec_quickcheck(input: Vec<u8>) -> bool {
let mut input = input;
input.extend_from_slice(&input.clone());
input.extend_from_slice(&input.clone());
if let Ok(z85) = z85_encode(&input) {
if let Ok(curve) = z85_decode(&z85) {
return curve == input;
}
}
false
}
}
#[test]
fn seven_bit_letters() {
for &l in LETTERS.iter() {
assert!(l < 0x80)
}
}
}
| 25.269676 | 80 | 0.579902 |
bb0de1d7c326d647f93773b5ba12e890ee9405ca | 15,159 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
anyhow::Error,
log::debug,
serde::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count.
command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
args.sequential,
match args.align {
true => args.block_size,
false => 0,
},
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn active_command_block_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
}
| 33.463576 | 100 | 0.644238 |
d778ac64ed230fe9cdaec4e75537a9137924d768 | 184 | use crate::challenges::assert_challenge_result;
#[test]
fn part_1() {
assert_challenge_result(12, 1, "757")
}
#[test]
fn part_2() {
assert_challenge_result(12, 2, "51249")
}
| 15.333333 | 47 | 0.679348 |
11bbb910e1243dba5d60230902e82571e621839e | 2,069 | use core::pin::Pin;
use futures_core::future::Future;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
use futures_sink::Sink;
use pin_utils::{unsafe_pinned, unsafe_unpinned};
/// Stream for the [`then`](super::StreamExt::then) method.
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
pub struct Then<St, Fut, F> {
stream: St,
future: Option<Fut>,
f: F,
}
impl<St: Unpin, Fut: Unpin, F> Unpin for Then<St, Fut, F> {}
impl<St, Fut, F> Then<St, Fut, F>
where St: Stream,
F: FnMut(St::Item) -> Fut,
{
unsafe_pinned!(stream: St);
unsafe_pinned!(future: Option<Fut>);
unsafe_unpinned!(f: F);
pub(super) fn new(stream: St, f: F) -> Then<St, Fut, F> {
Then {
stream,
future: None,
f,
}
}
}
impl<St: FusedStream, Fut, F> FusedStream for Then<St, Fut, F> {
fn is_terminated(&self) -> bool {
self.future.is_none() && self.stream.is_terminated()
}
}
impl<St, Fut, F> Stream for Then<St, Fut, F>
where St: Stream,
F: FnMut(St::Item) -> Fut,
Fut: Future,
{
type Item = Fut::Output;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Fut::Output>> {
if self.as_mut().future().as_pin_mut().is_none() {
let item = match ready!(self.as_mut().stream().poll_next(cx)) {
None => return Poll::Ready(None),
Some(e) => e,
};
let fut = (self.as_mut().f())(item);
self.as_mut().future().set(Some(fut));
}
let e = ready!(self.as_mut().future().as_pin_mut().unwrap().poll(cx));
self.as_mut().future().set(None);
Poll::Ready(Some(e))
}
}
// Forwarding impl of Sink from the underlying stream
impl<S, Fut, F, Item> Sink<Item> for Then<S, Fut, F>
where S: Stream + Sink<Item>,
F: FnMut(S::Item) -> Fut,
Fut: Future,
{
type SinkError = S::SinkError;
delegate_sink!(stream, Item);
}
| 26.525641 | 78 | 0.560174 |
ac748b0446d9451243825dcf5abc9ea2aea16777 | 4,352 | extern crate futures;
use futures::executor::block_on;
use futures::future::{join_all, Future, FutureResult, ok};
use futures::prelude::*;
#[derive(Clone, Copy, Debug, PartialEq)]
enum PlayerStatus {
Loading,
Default,
Jumping,
}
#[derive(Clone, Copy, Debug)]
struct Player {
name: &'static str,
status: PlayerStatus,
score: u32,
ticks: usize,
}
impl Player {
fn new(name: &'static str) -> Self {
let mut ticks = 1;
// Give Bob more ticks explicitly
if name == "Bob" {
ticks = 5;
}
Player {
name: name,
status: PlayerStatus::Loading,
score: 0,
ticks: ticks,
}
}
fn set_status(&mut self, status: PlayerStatus) -> FutureResult<&mut Self, Never> {
self.status = status;
ok(self)
}
fn can_add_points(&mut self) -> bool {
if self.status == PlayerStatus::Default {
return true;
}
println!("We couldn't add any points for {}!", self.name);
return false;
}
fn add_points(&mut self, points: u32) -> Async<&mut Self> {
if !self.can_add_points() {
Async::Ready(self)
} else {
let new_score = self.score + points;
// Here we would send the new score to a remote server
// but for now we will manaully increment the player's score.
self.score = new_score;
Async::Ready(self)
}
}
}
impl Future for Player {
type Item = Player;
type Error = ();
fn poll(&mut self, cx: &mut task::Context) -> Poll<Self::Item, Self::Error> {
// Presuming we fetch our player's score from a
// server upon initial load.
// After we perform the fetch send the Result<Async> value.
println!("Player {} has been poll'ed!", self.name);
if self.ticks == 0 {
self.status = PlayerStatus::Default;
Ok(Async::Ready(*self))
} else {
self.ticks -= 1;
cx.waker().wake();
Ok(Async::Pending)
}
}
}
fn async_add_points(player: &mut Player,
points: u32)
-> Box<Future<Item = Player, Error = Never> + Send> {
// Presuming that player.add_points() will send the points to a
// database/server over a network and returns an updated
// player score from the server/database.
let _ = player.add_points(points);
// Additionally, we may want to add logging mechanisms,
// friend notifications, etc. here.
return Box::new(ok(*player));
}
fn display_scoreboard(players: Vec<&Player>) {
for player in players {
println!("{}'s Score: {}", player.name, player.score);
}
}
fn main() {
let mut player1 = Player::new("Bob");
let mut player2 = Player::new("Alice");
let tasks = join_all(vec![player1, player2]);
let f = join_all(vec![
async_add_points(&mut player1, 5),
async_add_points(&mut player2, 2),
])
.then(|x| {
println!("First batch of adding points is done.");
x
});
block_on(f).unwrap();
let players = block_on(tasks).unwrap();
player1 = players[0];
player2 = players[1];
println!("Scores should be zero since no players were loaded");
display_scoreboard(vec![&player1, &player2]);
// In our minigame, a player cannot score if they are currently
// in the air or "jumping."
// Let's make one of our players' status set to the jumping status.
let f = player2.set_status(PlayerStatus::Jumping).and_then(move |mut new_player2| {
async_add_points(&mut player1, 10)
.and_then(move |_| {
println!("Finished trying to give Player 1 points.");
async_add_points(&mut new_player2, 2)
})
.then(move |new_player2| {
println!("Finished trying to give Player 2 points.");
println!("Player 1 (Bob) should have a score of 10 and Player 2 (Alice) should \
have a score of 0");
// unwrap is used here to since
display_scoreboard(vec![&player1, &new_player2.unwrap()]);
new_player2
})
});
block_on(f).unwrap();
println!("All done!");
}
| 27.544304 | 96 | 0.560432 |
d708c7b4213b2ba69916eb15f7e8d7488bab9c41 | 126 | // run-pass
// aux-build:issue-2380.rs
// pretty-expanded FIXME #23616
extern crate a;
pub fn main() {
a::f::<()>();
}
| 11.454545 | 31 | 0.579365 |
d7f58449418cc4911aa2e61ff7c629f1de548baa | 22,881 | #![recursion_limit = "128"]
extern crate backtrace;
extern crate bitcoin;
extern crate bitcoin_hashes;
extern crate bitcoincore_rpc;
extern crate chrono;
extern crate dirs;
extern crate jsonrpc;
extern crate libc;
extern crate rand;
extern crate secp256k1;
extern crate serde;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate lazy_static;
extern crate failure;
#[macro_use]
extern crate log;
#[cfg(feature = "android_logger")]
extern crate android_log;
#[cfg(feature = "stderr_logger")]
extern crate stderrlog;
extern crate url;
// Liquid
#[cfg(feature = "liquid")]
extern crate elements;
#[cfg(feature = "liquid")]
extern crate liquid_rpc;
pub mod coins;
pub mod constants;
#[macro_use]
pub mod errors;
pub mod network;
pub mod session;
pub mod settings;
pub mod util;
pub mod wallet;
// Liquid
#[cfg(feature = "liquid")]
pub mod wally;
use serde_json::{from_value, Value};
use std::mem::transmute;
use std::os::raw::c_char;
use std::sync::{Arc, Mutex};
#[cfg(feature = "android_logger")]
use std::sync::{Once, ONCE_INIT};
use crate::constants::{GA_ERROR, GA_MEMO_USER, GA_OK};
use crate::errors::OptionExt;
use crate::network::Network;
use crate::session::{spawn_ticker, GA_session, SessionManager};
use crate::util::{extend, log_filter, make_str, read_str};
use crate::wallet::Wallet;
lazy_static! {
static ref SESS_MANAGER: Arc<Mutex<SessionManager>> = {
let sm = SessionManager::new();
spawn_ticker(Arc::clone(&sm));
sm
};
}
#[derive(Debug)]
#[repr(C)]
pub struct GA_json(Value);
impl GA_json {
fn new(data: Value) -> *const GA_json {
unsafe { transmute(Box::new(GA_json(data))) }
}
}
#[derive(Debug)]
#[repr(C)]
pub enum GA_auth_handler {
Error(String),
Done(Value),
}
impl GA_auth_handler {
fn error(err: String) -> *const GA_auth_handler {
let handler = GA_auth_handler::Error(err);
unsafe { transmute(Box::new(handler)) }
}
fn done(res: Value) -> *const GA_auth_handler {
debug!("GA_auth_handler::done() {:?}", res);
let handler = GA_auth_handler::Done(res);
unsafe { transmute(Box::new(handler)) }
}
fn success() -> *const GA_auth_handler {
GA_auth_handler::done(Value::Null)
}
fn to_json(&self) -> Value {
match self {
GA_auth_handler::Error(err) => json!({ "status": "error", "error": err }),
GA_auth_handler::Done(res) => json!({ "status": "done", "result": res }),
}
}
}
//
// Macros
//
macro_rules! tryit {
($x:expr) => {
match $x {
Err(err) => {
debug!("error: {:?}", err);
return GA_ERROR;
}
Ok(x) => {
// can't easily print x because bitcoincore_rpc::Client is not serializable :(
// should be fixed with https://github.com/rust-bitcoin/rust-bitcoincore-rpc/pull/51
debug!("tryit!() succeed");
x
}
}
};
}
macro_rules! ok {
($t:expr, $x:expr) => {
unsafe {
let x = $x;
debug!("ok!() {:?}", x);
*$t = x;
GA_OK
}
};
}
macro_rules! ok_json {
($t:expr, $x:expr) => {{
let x = json!($x);
debug!("ok_json!() {:?}", x);
ok!($t, GA_json::new(x))
}};
}
//
// Networks
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_networks(ret: *mut *const GA_json) -> i32 {
let networks = Network::list();
let names: Vec<String> = networks.keys().cloned().collect();
let mut networks = json!(networks);
let networks = networks.as_object_mut().unwrap();
networks.insert("all_networks".to_string(), json!(names));
ok_json!(ret, networks)
}
//
// Session & account management
//
#[cfg(feature = "android_logger")]
static INIT_LOGGER: Once = ONCE_INIT;
#[no_mangle]
pub extern "C" fn GDKRPC_GA_init(config: *const GA_json) -> i32 {
debug!("GA_init() config: {:?}", config);
#[cfg(feature = "android_logger")]
INIT_LOGGER.call_once(|| android_log::init("gdk_rpc").unwrap());
GA_OK
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_create_session(ret: *mut *const GA_session) -> i32 {
debug!("GA_create_session()");
#[cfg(feature = "android_logger")]
INIT_LOGGER.call_once(|| android_log::init("gdk_rpc").unwrap());
let mut sm = SESS_MANAGER.lock().unwrap();
let sess = sm.register();
ok!(ret, sess)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_destroy_session(sess: *mut GA_session) -> i32 {
let mut sm = SESS_MANAGER.lock().unwrap();
{
// Make sure the wallet is logged out.
let sess = sm.get_mut(sess).unwrap();
if let Some(wallet) = sess.wallet.take() {
tryit!(wallet.logout());
}
}
tryit!(sm.remove(sess));
GA_OK
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_connect(
sess: *mut GA_session,
network_name: *const c_char,
log_level: u32,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
log::set_max_level(log_filter(log_level));
let network_name = read_str(network_name);
sess.network = Some(tryit!(Network::get(&network_name).or_err("missing network")));
debug!("GA_connect() {:?}", sess);
GA_OK
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_disconnect(sess: *mut GA_session) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
sess.network = None;
if let Some(wallet) = sess.wallet.take() {
tryit!(wallet.logout());
}
debug!("GA_disconnect() {:?}", sess);
GA_OK
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_register_user(
sess: *mut GA_session,
_hw_device: *const GA_json,
mnemonic: *const c_char,
ret: *mut *const GA_auth_handler,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
let mnemonic = read_str(mnemonic);
debug!("GA_register_user({}) {:?}", mnemonic, sess);
let network = tryit!(sess.network.or_err("session not connected"));
sess.wallet = Some(tryit!(Wallet::register(network, &mnemonic)));
ok!(ret, GA_auth_handler::success())
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_login(
sess: *mut GA_session,
_hw_device: *const GA_json,
mnemonic: *const c_char,
password: *const c_char,
ret: *mut *const GA_auth_handler,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
let mnemonic = read_str(mnemonic);
if read_str(password).len() > 0 {
warn!("password-encrypted mnemonics are unsupported");
return GA_ERROR;
}
if let Some(ref wallet) = sess.wallet {
if wallet.mnemonic() != mnemonic {
warn!("user called login but was already logged-in");
return GA_ERROR;
} else {
// Here we silently do nothing because the user is already logged in.
// This happens when a user calls register.
}
} else {
debug!("GA_login({}) {:?}", mnemonic, sess);
let network = tryit!(sess.network.or_err("session not connected"));
sess.wallet = Some(tryit!(Wallet::login(network, &mnemonic)));
}
tryit!(sess.hello());
ok!(ret, GA_auth_handler::success())
}
//
// Transactions & Coins
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_transactions(
sess: *const GA_session,
details: *const GA_json,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let details = &unsafe { &*details }.0;
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let txs = tryit!(wallet.get_transactions(&details));
// XXX should we free details or should the client?
ok_json!(ret, txs)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_transaction_details(
sess: *const GA_session,
txid: *const c_char,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let txid = read_str(txid);
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let tx = tryit!(wallet.get_transaction(&txid));
ok_json!(ret, tx)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_balance(
sess: *const GA_session,
details: *const GA_json,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let details = &unsafe { &*details }.0;
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let balance = tryit!(wallet.get_balance(&details));
ok_json!(ret, balance)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_set_transaction_memo(
sess: *const GA_session,
txid: *const c_char,
memo: *const c_char,
memo_type: u32,
) -> i32 {
if memo_type != GA_MEMO_USER {
warn!("unsupported memo type");
return GA_ERROR;
}
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let txid = read_str(txid);
let memo = read_str(memo);
tryit!(wallet.set_tx_memo(&txid, &memo[..]));
GA_OK
}
//
// Creating transactions
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_create_transaction(
sess: *const GA_session,
details: *const GA_json,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let details = &unsafe { &*details }.0;
debug!("GA_create_transaction() {:?}", details);
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let res = json!({
"addressees": &details["addressees"],
"is_sweep": false,
"memo": "",
"change_subaccount": 0,
"fee": 100, // FIXME
"satoshi": 500, // FIXME
});
let tx_unsigned = match wallet.create_transaction(&details) {
Err(err) => {
// errors are returned as a GA_OK with "error" in the returned object
debug!("GA_create_transaction error: {:?}", err);
return ok_json!(
ret,
extend(
res,
json!({
"error": err.to_gdk_code(),
"error_msg": err.to_string(),
})
)
.unwrap()
);
}
Ok(x) => x,
};
debug!("GA_create_transaction() tx_unsigned {}", tx_unsigned);
ok_json!(ret, extend(res, json!({ "error": "", "hex": tx_unsigned })).unwrap())
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_sign_transaction(
sess: *const GA_session,
tx_detail_unsigned: *const GA_json,
ret: *mut *const GA_auth_handler,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let tx_detail_unsigned = &unsafe { &*tx_detail_unsigned }.0;
debug!("GA_sign_transaction() {:?}", tx_detail_unsigned);
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let tx_signed = tryit!(wallet.sign_transaction(&tx_detail_unsigned));
debug!("GA_sign_transaction() {:?}", tx_signed);
ok!(ret, GA_auth_handler::done(json!({ "error": "", "hex": tx_signed, "is_sweep": false })))
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_send_transaction(
sess: *const GA_session,
tx_detail_signed: *const GA_json,
ret: *mut *const GA_auth_handler,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let tx_detail_signed = &unsafe { &*tx_detail_signed }.0;
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let txid = tryit!(wallet.send_transaction(&tx_detail_signed));
ok!(ret, GA_auth_handler::done(json!({ "error": "", "txid": txid })))
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_broadcast_transaction(
sess: *const GA_session,
tx_hex: *const c_char,
ret: *mut *const c_char,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let tx_hex = read_str(tx_hex);
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let txid = tryit!(wallet.send_raw_transaction(&tx_hex));
ok!(ret, make_str(txid))
}
//
// Addresses
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_receive_address(
sess: *const GA_session,
addr_details: *const GA_json,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let addr_details = &unsafe { &*addr_details }.0;
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let address = tryit!(wallet.get_receive_address(&addr_details));
ok_json!(ret, address)
}
//
// Subaccounts
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_subaccounts(
sess: *const GA_session,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let account = tryit!(wallet.get_account(0));
// always returns a list of a single account
ok_json!(ret, [account])
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_subaccount(
sess: *const GA_session,
index: u32,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let account = tryit!(wallet.get_account(index));
ok_json!(ret, account)
}
//
// Mnemonic
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_mnemonic_passphrase(
sess: *const GA_session,
_password: *const c_char,
ret: *mut *const c_char,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
ok!(ret, make_str(wallet.mnemonic()))
}
//
// Auth handler
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_auth_handler_get_status(
auth_handler: *const GA_auth_handler,
ret: *mut *const GA_json,
) -> i32 {
let auth_handler = unsafe { &*auth_handler };
let status = auth_handler.to_json();
ok_json!(ret, status)
}
//
// Currency conversion & fees
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_available_currencies(
sess: *const GA_session,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let currencies = wallet.get_available_currencies();
ok_json!(ret, currencies)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_convert_amount(
sess: *const GA_session,
value_details: *const GA_json,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let value_details = &unsafe { &*value_details }.0;
debug!("GA_convert_amount() {:?}", value_details);
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let units = tryit!(wallet.convert_amount(&value_details));
debug!("GA_convert_amount() result: {:?}", units);
ok_json!(ret, units)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_fee_estimates(
sess: *const GA_session,
ret: *mut *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
let wallet = tryit!(sess.wallet().or_err("no loaded wallet"));
let estimates = tryit!(wallet.get_fee_estimates().or_err("fee estimates unavailable"));
ok_json!(ret, json!({ "fees": estimates }))
}
//
// Push notifications
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_set_notification_handler(
sess: *mut GA_session,
handler: extern "C" fn(*const libc::c_void, *const GA_json),
context: *const libc::c_void,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
sess.notify = Some((handler, context));
GA_OK
}
//
// Settings
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_settings(sess: *const GA_session, ret: *mut *const GA_json) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get(sess).unwrap();
ok_json!(ret, json!(sess.settings))
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_change_settings(
sess: *mut GA_session,
settings: *const GA_json,
ret: *mut *const GA_auth_handler,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
let new_settings = &unsafe { &*settings }.0;
// XXX should we allow patching just some setting fields instead of replacing it?
sess.settings = tryit!(from_value(new_settings.clone()));
ok!(ret, GA_auth_handler::success())
}
//
// Unimplemented, but gracefully degrades
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_system_message(
_sess: *const GA_session,
ret: *mut *const c_char,
) -> i32 {
// an empty string implies no system messages
ok!(ret, make_str("".to_string()))
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_twofactor_config(
_sess: *const GA_session,
ret: *mut *const GA_json,
) -> i32 {
// 2FA is always off
ok_json!(
ret,
json!({
"any_enabled":false,
"all_methods":[],
"enabled_methods":[],
"email":{"confirmed":false,"data":"","enabled":false},
"limits":{"bits":"0.00","btc":"0.00000000","fiat":"0.00","fiat_currency":"USD","fiat_rate":"0","is_fiat":false,"mbtc":"0.00000","satoshi":0,"ubtc":"0.00"},
"twofactor_reset":{"days_remaining":-1,"is_active":false,"is_disputed":false},
})
)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_reconnect_hint(_sess: *const GA_session, _hint: *const GA_json) -> i32 {
GA_OK
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_watch_only_username(
_sess: *mut GA_session,
ret: *mut *const c_char,
) -> i32 {
ok!(ret, make_str("".to_string()))
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_set_pin(
_sess: *const GA_session,
mnemonic: *const c_char,
_pin: *const c_char,
device_id: *const c_char,
ret: *mut *const GA_json,
) -> i32 {
let mnemonic = read_str(mnemonic);
let device_id = read_str(device_id);
let mnemonic_hex = hex::encode(&tryit!(wally::bip39_mnemonic_to_bytes(&mnemonic)));
// FIXME setting a PIN does not actually do anything, just a successful no-op
ok_json!(
ret,
json!({
"encrypted_data": mnemonic_hex,
"salt": "IA==",
"pin_identifier": device_id,
"__unencrypted": true
})
)
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_login_with_pin(
sess: *mut GA_session,
_pin: *const c_char,
pin_data: *const GA_json,
) -> i32 {
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
let pin_data = &unsafe { &*pin_data }.0;
let entropy_hex = tryit!(pin_data["encrypted_data"].as_str().req()).to_string();
let entropy = tryit!(hex::decode(&entropy_hex));
let mnemonic = wally::bip39_mnemonic_from_bytes(&entropy);
debug!("GA_login_with_pin mnemonic: {}", mnemonic);
let network = tryit!(sess.network.or_err("session not connected"));
sess.wallet = Some(tryit!(Wallet::login(network, &mnemonic)));
tryit!(sess.hello());
GA_OK
}
//
// Unimplemented and GA_ERROR's
//
#[no_mangle]
pub extern "C" fn GDKRPC_GA_connect_with_proxy(
_sess: *const GA_session,
_network: *const c_char,
_proxy_uri: *const c_char,
_use_tor: u32,
_log_level: u32,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_set_watch_only(
_sess: *mut GA_session,
_username: *const c_char,
_password: *const c_char,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_login_watch_only(
_sess: *mut GA_session,
_username: *const c_char,
_password: *const c_char,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_remove_account(
_sess: *mut GA_session,
_ret: *mut *const GA_auth_handler,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_create_subaccount(
_sess: *const GA_session,
_details: *const GA_json,
_ret: *mut *const GA_auth_handler,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_unspent_outputs(
_sess: *const GA_session,
_details: *const GA_json,
_ret: *mut *const GA_json,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_get_unspent_outputs_for_private_key(
_sess: *const GA_session,
_private_key: *const c_char,
_password: *const c_char,
_unused: u32,
_ret: *mut *const GA_json,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_send_nlocktimes(_sess: *const GA_session) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_encrypt(
_sess: *const GA_session,
_data: *const GA_json,
_ret: *mut *const GA_json,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_decrypt(
_sess: *const GA_session,
_data: *const GA_json,
_ret: *mut *const GA_json,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_auth_handler_request_code(
_auth_handler: *const GA_auth_handler,
_method: *const c_char,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_auth_handler_resolve_code(
_auth_handler: *const GA_auth_handler,
_code: *const c_char,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_auth_handler_call(_auth_handler: *const GA_auth_handler) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_change_settings_twofactor(
_sess: *const GA_session,
_method: *const c_char,
_twofactor_details: *const GA_json,
_ret: *mut *const GA_auth_handler,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_twofactor_reset(
_sess: *const GA_session,
_email: *const c_char,
_is_dispute: u32,
_ret: *mut *const GA_auth_handler,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_twofactor_cancel_reset(
_sess: *const GA_session,
_ret: *mut *const GA_auth_handler,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_twofactor_change_limits(
_sess: *const GA_session,
_limit_details: *const GA_json,
_ret: *mut *const GA_auth_handler,
) -> i32 {
GA_ERROR
}
#[no_mangle]
pub extern "C" fn GDKRPC_GA_register_network(
_name: *const c_char,
_network_details: *const GA_json,
) -> i32 {
GA_ERROR
}
//
// Unit test helper methods
//
#[no_mangle]
pub extern "C" fn GDKRPC_test_tick(sess: *mut GA_session) -> i32 {
debug!("GA_test_tick()");
let sm = SESS_MANAGER.lock().unwrap();
let sess = sm.get_mut(sess).unwrap();
tryit!(sess.tick());
GA_OK
}
| 24.789816 | 167 | 0.627726 |
dda3cf8cd775608a3ecbfb0c0a9f7b8eddc1af36 | 79,955 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub fn parse_http_generic_error(
response: &http::Response<bytes::Bytes>,
) -> Result<smithy_types::Error, smithy_json::deserialize::Error> {
crate::json_errors::parse_generic_error(response.body(), response.headers())
}
pub fn deser_structure_crate_error_concurrent_update_exceptionjson_err(
input: &[u8],
mut builder: crate::error::concurrent_update_exception::Builder,
) -> Result<crate::error::concurrent_update_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_internal_service_exceptionjson_err(
input: &[u8],
mut builder: crate::error::internal_service_exception::Builder,
) -> Result<crate::error::internal_service_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_limit_exceeded_exceptionjson_err(
input: &[u8],
mut builder: crate::error::limit_exceeded_exception::Builder,
) -> Result<crate::error::limit_exceeded_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_validation_exceptionjson_err(
input: &[u8],
mut builder: crate::error::validation_exception::Builder,
) -> Result<crate::error::validation_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_create_scaling_plan(
input: &[u8],
mut builder: crate::output::create_scaling_plan_output::Builder,
) -> Result<crate::output::create_scaling_plan_output::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"ScalingPlanVersion" => {
builder = builder.set_scaling_plan_version(
smithy_json::deserialize::token::expect_number_or_null(tokens.next())?
.map(|v| v.to_i64()),
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_object_not_found_exceptionjson_err(
input: &[u8],
mut builder: crate::error::object_not_found_exception::Builder,
) -> Result<crate::error::object_not_found_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_invalid_next_token_exceptionjson_err(
input: &[u8],
mut builder: crate::error::invalid_next_token_exception::Builder,
) -> Result<crate::error::invalid_next_token_exception::Builder, smithy_json::deserialize::Error> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_describe_scaling_plan_resources(
input: &[u8],
mut builder: crate::output::describe_scaling_plan_resources_output::Builder,
) -> Result<
crate::output::describe_scaling_plan_resources_output::Builder,
smithy_json::deserialize::Error,
> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"ScalingPlanResources" => {
builder = builder.set_scaling_plan_resources(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_scaling_plan_resources(tokens)?
);
}
"NextToken" => {
builder = builder.set_next_token(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_describe_scaling_plans(
input: &[u8],
mut builder: crate::output::describe_scaling_plans_output::Builder,
) -> Result<crate::output::describe_scaling_plans_output::Builder, smithy_json::deserialize::Error>
{
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"ScalingPlans" => {
builder = builder.set_scaling_plans(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_scaling_plans(tokens)?
);
}
"NextToken" => {
builder = builder.set_next_token(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_get_scaling_plan_resource_forecast_data(
input: &[u8],
mut builder: crate::output::get_scaling_plan_resource_forecast_data_output::Builder,
) -> Result<
crate::output::get_scaling_plan_resource_forecast_data_output::Builder,
smithy_json::deserialize::Error,
> {
let mut tokens_owned =
smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input))
.peekable();
let tokens = &mut tokens_owned;
smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Datapoints" => {
builder = builder.set_datapoints(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_datapoints(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
if tokens.next().is_some() {
return Err(smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn or_empty_doc(data: &[u8]) -> &[u8] {
if data.is_empty() {
b"{}"
} else {
data
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_scaling_plan_resources<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::ScalingPlanResource>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_scaling_plan_resource(
tokens,
)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_scaling_plans<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::ScalingPlan>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_scaling_plan(tokens)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_datapoints<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::Datapoint>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_datapoint(tokens)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_scaling_plan_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::ScalingPlanResource>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::ScalingPlanResource::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"ScalingPlanName" => {
builder = builder.set_scaling_plan_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"ScalingPlanVersion" => {
builder = builder.set_scaling_plan_version(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i64()),
);
}
"ServiceNamespace" => {
builder = builder.set_service_namespace(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ServiceNamespace::from(u.as_ref())
})
})
.transpose()?,
);
}
"ResourceId" => {
builder = builder.set_resource_id(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"ScalableDimension" => {
builder = builder.set_scalable_dimension(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ScalableDimension::from(u.as_ref())
})
})
.transpose()?,
);
}
"ScalingPolicies" => {
builder = builder.set_scaling_policies(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_scaling_policies(tokens)?
);
}
"ScalingStatusCode" => {
builder = builder.set_scaling_status_code(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ScalingStatusCode::from(u.as_ref())
})
})
.transpose()?,
);
}
"ScalingStatusMessage" => {
builder = builder.set_scaling_status_message(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_scaling_plan<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::ScalingPlan>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::ScalingPlan::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"ScalingPlanName" => {
builder = builder.set_scaling_plan_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"ScalingPlanVersion" => {
builder = builder.set_scaling_plan_version(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i64()),
);
}
"ApplicationSource" => {
builder = builder.set_application_source(
crate::json_deser::deser_structure_crate_model_application_source(tokens)?
);
}
"ScalingInstructions" => {
builder = builder.set_scaling_instructions(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_scaling_instructions(tokens)?
);
}
"StatusCode" => {
builder = builder.set_status_code(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ScalingPlanStatusCode::from(u.as_ref())
})
})
.transpose()?,
);
}
"StatusMessage" => {
builder = builder.set_status_message(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"StatusStartTime" => {
builder = builder.set_status_start_time(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"CreationTime" => {
builder = builder.set_creation_time(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_datapoint<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::Datapoint>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::Datapoint::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Timestamp" => {
builder = builder.set_timestamp(
smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
smithy_types::instant::Format::EpochSeconds,
)?,
);
}
"Value" => {
builder = builder.set_value(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_f64()),
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_scaling_policies<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::ScalingPolicy>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_scaling_policy(tokens)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_application_source<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::ApplicationSource>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::ApplicationSource::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"CloudFormationStackARN" => {
builder = builder.set_cloud_formation_stack_arn(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"TagFilters" => {
builder = builder.set_tag_filters(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_tag_filters(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_scaling_instructions<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::ScalingInstruction>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_scaling_instruction(
tokens,
)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_scaling_policy<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::ScalingPolicy>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::ScalingPolicy::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"PolicyName" => {
builder = builder.set_policy_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"PolicyType" => {
builder = builder.set_policy_type(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::PolicyType::from(u.as_ref()))
})
.transpose()?,
);
}
"TargetTrackingConfiguration" => {
builder = builder.set_target_tracking_configuration(
crate::json_deser::deser_structure_crate_model_target_tracking_configuration(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_tag_filters<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::TagFilter>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_tag_filter(tokens)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_scaling_instruction<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::ScalingInstruction>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::ScalingInstruction::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"ServiceNamespace" => {
builder = builder.set_service_namespace(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ServiceNamespace::from(u.as_ref())
})
})
.transpose()?,
);
}
"ResourceId" => {
builder = builder.set_resource_id(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"ScalableDimension" => {
builder = builder.set_scalable_dimension(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ScalableDimension::from(u.as_ref())
})
})
.transpose()?,
);
}
"MinCapacity" => {
builder = builder.set_min_capacity(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i32()),
);
}
"MaxCapacity" => {
builder = builder.set_max_capacity(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i32()),
);
}
"TargetTrackingConfigurations" => {
builder = builder.set_target_tracking_configurations(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_target_tracking_configurations(tokens)?
);
}
"PredefinedLoadMetricSpecification" => {
builder = builder.set_predefined_load_metric_specification(
crate::json_deser::deser_structure_crate_model_predefined_load_metric_specification(tokens)?
);
}
"CustomizedLoadMetricSpecification" => {
builder = builder.set_customized_load_metric_specification(
crate::json_deser::deser_structure_crate_model_customized_load_metric_specification(tokens)?
);
}
"ScheduledActionBufferTime" => {
builder = builder.set_scheduled_action_buffer_time(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i32()),
);
}
"PredictiveScalingMaxCapacityBehavior" => {
builder = builder.set_predictive_scaling_max_capacity_behavior(
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?.map(|s|
s.to_unescaped().map(|u|
crate::model::PredictiveScalingMaxCapacityBehavior::from(u.as_ref())
)
).transpose()?
);
}
"PredictiveScalingMaxCapacityBuffer" => {
builder = builder.set_predictive_scaling_max_capacity_buffer(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i32()),
);
}
"PredictiveScalingMode" => {
builder = builder.set_predictive_scaling_mode(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::PredictiveScalingMode::from(u.as_ref())
})
})
.transpose()?,
);
}
"ScalingPolicyUpdateBehavior" => {
builder = builder.set_scaling_policy_update_behavior(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ScalingPolicyUpdateBehavior::from(
u.as_ref(),
)
})
})
.transpose()?,
);
}
"DisableDynamicScaling" => {
builder = builder.set_disable_dynamic_scaling(
smithy_json::deserialize::token::expect_bool_or_null(
tokens.next(),
)?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_target_tracking_configuration<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::TargetTrackingConfiguration>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::TargetTrackingConfiguration::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"PredefinedScalingMetricSpecification" => {
builder = builder.set_predefined_scaling_metric_specification(
crate::json_deser::deser_structure_crate_model_predefined_scaling_metric_specification(tokens)?
);
}
"CustomizedScalingMetricSpecification" => {
builder = builder.set_customized_scaling_metric_specification(
crate::json_deser::deser_structure_crate_model_customized_scaling_metric_specification(tokens)?
);
}
"TargetValue" => {
builder = builder.set_target_value(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_f64()),
);
}
"DisableScaleIn" => {
builder = builder.set_disable_scale_in(
smithy_json::deserialize::token::expect_bool_or_null(
tokens.next(),
)?,
);
}
"ScaleOutCooldown" => {
builder = builder.set_scale_out_cooldown(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i32()),
);
}
"ScaleInCooldown" => {
builder = builder.set_scale_in_cooldown(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i32()),
);
}
"EstimatedInstanceWarmup" => {
builder = builder.set_estimated_instance_warmup(
smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(|v| v.to_i32()),
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_tag_filter<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::TagFilter>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::TagFilter::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Key" => {
builder = builder.set_key(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"Values" => {
builder = builder.set_values(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_tag_values(tokens)?
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_target_tracking_configurations<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::vec::Vec<crate::model::TargetTrackingConfiguration>>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_target_tracking_configuration(tokens)?
;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_predefined_load_metric_specification<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::PredefinedLoadMetricSpecification>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::PredefinedLoadMetricSpecification::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"PredefinedLoadMetricType" => {
builder = builder.set_predefined_load_metric_type(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::LoadMetricType::from(u.as_ref()))
})
.transpose()?,
);
}
"ResourceLabel" => {
builder = builder.set_resource_label(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_customized_load_metric_specification<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::CustomizedLoadMetricSpecification>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::CustomizedLoadMetricSpecification::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"MetricName" => {
builder = builder.set_metric_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"Namespace" => {
builder = builder.set_namespace(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"Dimensions" => {
builder = builder.set_dimensions(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_metric_dimensions(tokens)?
);
}
"Statistic" => {
builder = builder.set_statistic(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::MetricStatistic::from(u.as_ref())
})
})
.transpose()?,
);
}
"Unit" => {
builder = builder.set_unit(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_predefined_scaling_metric_specification<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::PredefinedScalingMetricSpecification>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::PredefinedScalingMetricSpecification::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"PredefinedScalingMetricType" => {
builder = builder.set_predefined_scaling_metric_type(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::ScalingMetricType::from(u.as_ref())
})
})
.transpose()?,
);
}
"ResourceLabel" => {
builder = builder.set_resource_label(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
pub fn deser_structure_crate_model_customized_scaling_metric_specification<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::CustomizedScalingMetricSpecification>,
smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::CustomizedScalingMetricSpecification::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"MetricName" => {
builder = builder.set_metric_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"Namespace" => {
builder = builder.set_namespace(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"Dimensions" => {
builder = builder.set_dimensions(
crate::json_deser::deser_list_com_amazonaws_autoscalingplans_metric_dimensions(tokens)?
);
}
"Statistic" => {
builder = builder.set_statistic(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::MetricStatistic::from(u.as_ref())
})
})
.transpose()?,
);
}
"Unit" => {
builder = builder.set_unit(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_tag_values<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<std::string::String>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
smithy_json::deserialize::token::expect_string_or_null(tokens.next())?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_autoscalingplans_metric_dimensions<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::MetricDimension>>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_metric_dimension(
tokens,
)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_metric_dimension<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::MetricDimension>, smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::MetricDimension::builder();
loop {
match tokens.next().transpose()? {
Some(smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Name" => {
builder = builder.set_name(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"Value" => {
builder = builder.set_value(
smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => smithy_json::deserialize::token::skip_value(tokens)?,
}
}
_ => {
return Err(smithy_json::deserialize::Error::custom(
"expected object key or end object",
))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
| 45.197852 | 136 | 0.428816 |
d7789714c3095eefe623982b5d0bc1615028d578 | 23,590 | // Copyright 2017 GFX Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "genmesh")]
pub use genmesh::{Polygon, Quad, Triangle};
use std::borrow::Cow;
use std::collections::HashMap;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use mtl::{Material, Mtl};
#[derive(Debug, Clone, Copy, Hash, PartialEq, PartialOrd, Eq, Ord)]
pub struct IndexTuple(pub usize, pub Option<usize>, pub Option<usize>);
pub type SimplePolygon = Vec<IndexTuple>;
pub trait GenPolygon: Clone + std::fmt::Debug {
fn new(data: SimplePolygon) -> Self;
}
impl GenPolygon for SimplePolygon {
fn new(data: Self) -> Self {
data
}
}
#[cfg(feature = "genmesh")]
impl GenPolygon for Polygon<IndexTuple> {
fn new(gs: SimplePolygon) -> Self {
match gs.len() {
3 => Polygon::PolyTri(Triangle::new(gs[0], gs[1], gs[2])),
4 => Polygon::PolyQuad(Quad::new(gs[0], gs[1], gs[2], gs[3])),
_ => panic!("Unsupported"),
}
}
}
#[derive(Debug, Clone)]
pub struct Object<'a, P: GenPolygon>
{
pub name: String,
pub groups: Vec<Group<'a, P>>,
}
impl<'a, P: GenPolygon> Object<'a, P>
{
pub fn new(name: String) -> Self {
Object { name: name, groups: Vec::new() }
}
}
#[derive(Debug, Clone)]
pub struct Group<'a, P>
where
P: 'a + GenPolygon,
{
pub name: String,
/// An index is used to tell groups apart that share the same name
pub index: usize,
pub material: Option<Cow<'a, Material>>,
pub polys: Vec<P>,
}
impl<'a, P> Group<'a, P>
where
P: 'a + GenPolygon,
{
pub fn new(name: String) -> Self {
Group {
name: name,
index: 0,
material: None,
polys: Vec::new(),
}
}
}
#[derive(Debug, Clone)]
pub struct Obj<'a, P>
where
P: 'a + GenPolygon,
{
pub position: Vec<[f32; 3]>,
pub texture: Vec<[f32; 2]>,
pub normal: Vec<[f32; 3]>,
pub objects: Vec<Object<'a, P>>,
pub material_libs: Vec<String>,
pub path: PathBuf,
}
fn normalize(idx: isize, len: usize) -> usize {
if idx < 0 {
(len as isize + idx) as usize
} else {
idx as usize - 1
}
}
impl<'a, P> Obj<'a, P>
where
P: GenPolygon,
{
fn new() -> Self {
Obj {
position: Vec::new(),
texture: Vec::new(),
normal: Vec::new(),
objects: Vec::new(),
material_libs: Vec::new(),
path: PathBuf::new(),
}
}
pub fn load_file(path: &Path) -> io::Result<Obj<P>> {
let f = File::open(path)?;
let mut obj = Obj::load_buf(&mut BufReader::new(f))?;
// unwrap is safe as we've read this file before
obj.path = path.parent().unwrap().to_owned();
Ok(obj)
}
/// Loads the .mtl files referenced in the .obj file.
///
/// If it encounters an error for an .mtl, it appends its error to the
/// returning Vec, and tries the rest.
///
/// The Result Err value format is a Vec, which items are tuples with first
/// index being the the .mtl file and the second its corresponding error.
pub fn load_mtls(&mut self) -> Result<(), Vec<(String, io::Error)>> {
let mut errs = Vec::new();
let mut materials = HashMap::new();
for m in &self.material_libs {
let file = match File::open(&self.path.join(&m)) {
Ok(f) => f,
Err(err) => {
errs.push((m.clone(), err));
continue;
}
};
let mtl = Mtl::load(&mut BufReader::new(file));
for m in mtl.materials {
materials.insert(m.name.clone(), Cow::from(m));
}
}
for object in &mut self.objects {
for group in &mut object.groups {
if let Some(ref mut mat) = group.material {
match materials.get(&mat.name) {
Some(newmat) => *mat = newmat.clone(),
None => {}
};
}
}
}
if errs.is_empty() { Ok(()) } else { Err(errs) }
}
fn parse_vertex(&mut self, v0: Option<&str>, v1: Option<&str>, v2: Option<&str>) {
let (v0, v1, v2) = match (v0, v1, v2) {
(Some(v0), Some(v1), Some(v2)) => (v0, v1, v2),
_ => {
panic!("could not parse line {:?} {:?} {:?}", v0, v1, v2);
}
};
let vertex = match (FromStr::from_str(v0), FromStr::from_str(v1), FromStr::from_str(v2)) {
(Ok(v0), Ok(v1), Ok(v2)) => [v0, v1, v2],
_ => {
panic!("could not parse line {:?} {:?} {:?}", v0, v1, v2);
}
};
self.position.push(vertex);
}
fn parse_texture(&mut self, t0: Option<&str>, t1: Option<&str>) {
let (t0, t1) = match (t0, t1) {
(Some(t0), Some(t1)) => (t0, t1),
_ => {
panic!("could not parse line {:?} {:?}", t0, t1);
}
};
let texture = match (FromStr::from_str(t0), FromStr::from_str(t1)) {
(Ok(t0), Ok(t1)) => [t0, t1],
_ => {
panic!("could not parse line {:?} {:?}", t0, t1);
}
};
self.texture.push(texture);
}
fn parse_normal(&mut self, n0: Option<&str>, n1: Option<&str>, n2: Option<&str>) {
let (n0, n1, n2) = match (n0, n1, n2) {
(Some(n0), Some(n1), Some(n2)) => (n0, n1, n2),
_ => {
panic!("could not parse line {:?} {:?} {:?}", n0, n1, n2);
}
};
let normal = match (FromStr::from_str(n0), FromStr::from_str(n1), FromStr::from_str(n2)) {
(Ok(n0), Ok(n1), Ok(n2)) => [n0, n1, n2],
_ => {
panic!("could not parse line {:?} {:?} {:?}", n0, n1, n2);
}
};
self.normal.push(normal);
}
fn parse_group(&self, group: &str) -> Result<IndexTuple, String> {
let mut group_split = group.split('/');
let p: Option<isize> = group_split.next().and_then(|idx| FromStr::from_str(idx).ok());
let t: Option<isize> =
group_split.next().and_then(|idx| if idx != "" { FromStr::from_str(idx).ok() } else { None });
let n: Option<isize> = group_split.next().and_then(|idx| FromStr::from_str(idx).ok());
match (p, t, n) {
(Some(p), v, n) => {
Ok(IndexTuple(normalize(p, self.position.len()),
v.map(|v| normalize(v, self.texture.len())),
n.map(|n| normalize(n, self.normal.len()))))
}
_ => Err(format!("poorly formed group {}", group)),
}
}
fn parse_face<'b, I>(&self, groups: &mut I) -> Result<P, String>
where
I: Iterator<Item=&'b str>,
{
let mut ret = Vec::with_capacity(3);
for g in groups {
let ituple = self.parse_group(g)?;
ret.push(ituple);
}
Ok(P::new(ret))
}
pub fn load_buf<B: BufRead>(input: &mut B) -> io::Result<Self>
{
let mut dat = Obj::new();
let mut object = Object::new("default".to_string());
let mut group: Option<Group<P>> = None;
for (idx, line) in input.lines().enumerate() {
let (line, mut words) = match line {
Ok(ref line) => (line.clone(), line.split_whitespace().filter(|s| !s.is_empty())),
Err(err) => {
return Err(io::Error::new(io::ErrorKind::InvalidData,
format!("failed to readline {}", err)));
}
};
let first = words.next();
match first {
Some("v") => {
let (v0, v1, v2) = (words.next(), words.next(), words.next());
dat.parse_vertex(v0, v1, v2);
}
Some("vt") => {
let (t0, t1) = (words.next(), words.next());
dat.parse_texture(t0, t1);
}
Some("vn") => {
let (n0, n1, n2) = (words.next(), words.next(), words.next());
dat.parse_normal(n0, n1, n2);
}
Some("f") => {
let poly = match dat.parse_face(&mut words) {
Err(e) => {
return Err(io::Error::new(io::ErrorKind::InvalidData,
format!("could not parse line: {}\nline: {}: {}", e, idx, line)));
}
Ok(poly) => poly,
};
group = Some(match group {
None => {
let mut g = Group::new("default".to_string());
g.polys.push(poly);
g
}
Some(mut g) => {
g.polys.push(poly);
g
}
});
}
Some("o") => {
group = match group {
Some(val) => {
object.groups.push(val);
dat.objects.push(object);
None
}
None => None,
};
object = if line.len() > 2 {
let name = line[1..].trim();
Object::new(name.to_string())
} else {
Object::new("default".to_string())
};
}
Some("g") => {
group = match group {
Some(val) => {
object.groups.push(val);
None
}
None => None,
};
if line.len() > 2 {
let name = line[2..].trim();
group = Some(Group::new(name.to_string()));
}
}
Some("mtllib") => {
let name = words.next().expect("Failed to find name for mtllib");
dat.material_libs.push(name.to_string());
}
Some("usemtl") => {
let mut g = match group {
Some(g) => g,
None => Group::new("default".to_string()),
};
// we found a new material that was applied to an existing
// object. It is treated as a new group.
if g.material.is_some() {
object.groups.push(g.clone());
g.index += 1;
g.polys.clear();
}
g.material = match words.next() {
Some(w) => Some(Cow::from(Material::new(w.to_string()))),
None => None,
};
group = Some(g);
}
Some("s") => (),
Some(other) => {
if !other.starts_with("#") {
panic!("Invalid token {:?} {:?}", other, words.next());
}
}
None => (),
}
}
match group {
Some(g) => object.groups.push(g),
None => (),
};
dat.objects.push(object);
Ok(dat)
}
pub fn load(input: Box<[u8]>) -> io::Result<Self>
{
let mut dat = Obj::new();
let mut object = Object::new("default".to_string());
let mut group: Option<Group<P>> = None;
let file = String::from_utf8(input.into_vec());
match file {
Ok(file) => {
for (idx, line) in file.lines().enumerate() {
let mut words= line.split_whitespace().filter(|s| !s.is_empty());
let first = words.next();
match first {
Some("v") => {
let (v0, v1, v2) = (words.next(), words.next(), words.next());
dat.parse_vertex(v0, v1, v2);
}
Some("vt") => {
let (t0, t1) = (words.next(), words.next());
dat.parse_texture(t0, t1);
}
Some("vn") => {
let (n0, n1, n2) = (words.next(), words.next(), words.next());
dat.parse_normal(n0, n1, n2);
}
Some("f") => {
let poly = match dat.parse_face(&mut words) {
Err(e) => {
return Err(io::Error::new(io::ErrorKind::InvalidData,
format!("could not parse line: {}\nline: {}: {}", e, idx, line)));
}
Ok(poly) => poly,
};
group = Some(match group {
None => {
let mut g = Group::new("default".to_string());
g.polys.push(poly);
g
}
Some(mut g) => {
g.polys.push(poly);
g
}
});
}
Some("o") => {
group = match group {
Some(val) => {
object.groups.push(val);
dat.objects.push(object);
None
}
None => None,
};
object = if line.len() > 2 {
let name = line[1..].trim();
Object::new(name.to_string())
} else {
Object::new("default".to_string())
};
}
Some("g") => {
group = match group {
Some(val) => {
object.groups.push(val);
None
}
None => None,
};
if line.len() > 2 {
let name = line[2..].trim();
group = Some(Group::new(name.to_string()));
}
}
Some("mtllib") => {
let name = words.next().expect("Failed to find name for mtllib");
dat.material_libs.push(name.to_string());
}
Some("usemtl") => {
let mut g = match group {
Some(g) => g,
None => Group::new("default".to_string()),
};
// we found a new material that was applied to an existing
// object. It is treated as a new group.
if g.material.is_some() {
object.groups.push(g.clone());
g.index += 1;
g.polys.clear();
}
g.material = match words.next() {
Some(w) => Some(Cow::from(Material::new(w.to_string()))),
None => None,
};
group = Some(g);
}
Some("s") => (),
Some(other) => {
if !other.starts_with("#") {
panic!("Invalid token {:?} {:?}", other, words.next());
}
}
None => (),
}
}
}
Err(err) => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("failed to read file as String with utf8 {}", err),
));
}
}
match group {
Some(g) => object.groups.push(g),
None => (),
};
dat.objects.push(object);
Ok(dat)
}
//TODO: make the type thread-safe and function parallelized
pub fn load_par(input: Box<[u8]>) -> io::Result<Self>
{
let mut dat = Obj::new();
let mut object = Object::new("default".to_string());
let mut group: Option<Group<P>> = None;
let file = String::from_utf8(input.into_vec());
match file {
Ok(file) => {
for (idx, line) in file.lines().enumerate() {
let mut words= line.split_whitespace().filter(|s| !s.is_empty());
let first = words.next();
match first {
Some("v") => {
let (v0, v1, v2) = (words.next(), words.next(), words.next());
dat.parse_vertex(v0, v1, v2);
}
Some("vt") => {
let (t0, t1) = (words.next(), words.next());
dat.parse_texture(t0, t1);
}
Some("vn") => {
let (n0, n1, n2) = (words.next(), words.next(), words.next());
dat.parse_normal(n0, n1, n2);
}
Some("f") => {
let poly = match dat.parse_face(&mut words) {
Err(e) => {
return Err(io::Error::new(io::ErrorKind::InvalidData,
format!("could not parse line: {}\nline: {}: {}", e, idx, line)));
}
Ok(poly) => poly,
};
group = Some(match group {
None => {
let mut g = Group::new("default".to_string());
g.polys.push(poly);
g
}
Some(mut g) => {
g.polys.push(poly);
g
}
});
}
Some("o") => {
group = match group {
Some(val) => {
object.groups.push(val);
dat.objects.push(object);
None
}
None => None,
};
object = if line.len() > 2 {
let name = line[1..].trim();
Object::new(name.to_string())
} else {
Object::new("default".to_string())
};
}
Some("g") => {
group = match group {
Some(val) => {
object.groups.push(val);
None
}
None => None,
};
if line.len() > 2 {
let name = line[2..].trim();
group = Some(Group::new(name.to_string()));
}
}
Some("mtllib") => {
let name = words.next().expect("Failed to find name for mtllib");
dat.material_libs.push(name.to_string());
}
Some("usemtl") => {
let mut g = match group {
Some(g) => g,
None => Group::new("default".to_string()),
};
// we found a new material that was applied to an existing
// object. It is treated as a new group.
if g.material.is_some() {
object.groups.push(g.clone());
g.index += 1;
g.polys.clear();
}
g.material = match words.next() {
Some(w) => Some(Cow::from(Material::new(w.to_string()))),
None => None,
};
group = Some(g);
}
Some("s") => (),
Some(other) => {
if !other.starts_with("#") {
panic!("Invalid token {:?} {:?}", other, words.next());
}
}
None => (),
}
}
}
Err(err) => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("failed to read file as String with utf8 {}", err),
));
}
}
match group {
Some(g) => object.groups.push(g),
None => (),
};
dat.objects.push(object);
Ok(dat)
}
}
| 38.171521 | 128 | 0.358923 |
91d4cd50cb67080f9db99877ec5032f2f1ac8994 | 11,205 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Service config.
///
///
/// Service configuration allows for customization of endpoints, region, credentials providers,
/// and retry configuration. Generally, it is constructed automatically for you from a shared
/// configuration loaded by the `aws-config` crate. For example:
///
/// ```ignore
/// // Load a shared config from the environment
/// let shared_config = aws_config::from_env().load().await;
/// // The client constructor automatically converts the shared config into the service config
/// let client = Client::new(&shared_config);
/// ```
///
/// The service config can also be constructed manually using its builder.
///
pub struct Config {
app_name: Option<aws_types::app_name::AppName>,
pub(crate) timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>,
pub(crate) sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
pub(crate) retry_config: Option<aws_smithy_types::retry::RetryConfig>,
pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>,
pub(crate) region: Option<aws_types::region::Region>,
pub(crate) credentials_provider: aws_types::credentials::SharedCredentialsProvider,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut config = f.debug_struct("Config");
config.finish()
}
}
impl Config {
/// Constructs a config builder.
pub fn builder() -> Builder {
Builder::default()
}
/// Returns the name of the app that is using the client, if it was provided.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn app_name(&self) -> Option<&aws_types::app_name::AppName> {
self.app_name.as_ref()
}
/// Creates a new [service config](crate::Config) from a [shared `config`](aws_types::config::Config).
pub fn new(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
/// The signature version 4 service signing name to use in the credential scope when signing requests.
///
/// The signing service may be overridden by the `Endpoint`, or by specifying a custom
/// [`SigningService`](aws_types::SigningService) during operation construction
pub fn signing_service(&self) -> &'static str {
"elasticmapreduce"
}
}
/// Builder for creating a `Config`.
#[derive(Default)]
pub struct Builder {
app_name: Option<aws_types::app_name::AppName>,
timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>,
sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
retry_config: Option<aws_smithy_types::retry::RetryConfig>,
endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>,
region: Option<aws_types::region::Region>,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
}
impl Builder {
/// Constructs a config builder.
pub fn new() -> Self {
Self::default()
}
/// Sets the name of the app that is using the client.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn app_name(mut self, app_name: aws_types::app_name::AppName) -> Self {
self.set_app_name(Some(app_name));
self
}
/// Sets the name of the app that is using the client.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn set_app_name(&mut self, app_name: Option<aws_types::app_name::AppName>) -> &mut Self {
self.app_name = app_name;
self
}
/// Set the timeout_config for the builder
///
/// # Examples
/// ```rust
/// # use std::time::Duration;
/// use aws_sdk_emr::config::Config;
/// use aws_smithy_types::timeout::TimeoutConfig;
///
/// let timeout_config = TimeoutConfig::new()
/// .with_api_call_attempt_timeout(Some(Duration::from_secs(1)));
/// let config = Config::builder().timeout_config(timeout_config).build();
/// ```
pub fn timeout_config(
mut self,
timeout_config: aws_smithy_types::timeout::TimeoutConfig,
) -> Self {
self.set_timeout_config(Some(timeout_config));
self
}
/// Set the timeout_config for the builder
///
/// # Examples
/// ```rust
/// # use std::time::Duration;
/// use aws_sdk_emr::config::{Builder, Config};
/// use aws_smithy_types::timeout::TimeoutConfig;
///
/// fn set_request_timeout(builder: &mut Builder) {
/// let timeout_config = TimeoutConfig::new()
/// .with_api_call_timeout(Some(Duration::from_secs(3)));
/// builder.set_timeout_config(Some(timeout_config));
/// }
///
/// let mut builder = Config::builder();
/// set_request_timeout(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_timeout_config(
&mut self,
timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>,
) -> &mut Self {
self.timeout_config = timeout_config;
self
}
/// Set the sleep_impl for the builder
///
/// # Examples
/// ```rust
/// use aws_sdk_emr::config::Config;
/// use aws_smithy_async::rt::sleep::AsyncSleep;
/// use aws_smithy_async::rt::sleep::Sleep;
///
/// #[derive(Debug)]
/// pub struct ForeverSleep;
///
/// impl AsyncSleep for ForeverSleep {
/// fn sleep(&self, duration: std::time::Duration) -> Sleep {
/// Sleep::new(std::future::pending())
/// }
/// }
///
/// let sleep_impl = std::sync::Arc::new(ForeverSleep);
/// let config = Config::builder().sleep_impl(sleep_impl).build();
/// ```
pub fn sleep_impl(
mut self,
sleep_impl: std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>,
) -> Self {
self.set_sleep_impl(Some(sleep_impl));
self
}
/// Set the sleep_impl for the builder
///
/// # Examples
/// ```rust
/// use aws_sdk_emr::config::{Builder, Config};
/// use aws_smithy_async::rt::sleep::AsyncSleep;
/// use aws_smithy_async::rt::sleep::Sleep;
///
/// #[derive(Debug)]
/// pub struct ForeverSleep;
///
/// impl AsyncSleep for ForeverSleep {
/// fn sleep(&self, duration: std::time::Duration) -> Sleep {
/// Sleep::new(std::future::pending())
/// }
/// }
///
/// fn set_never_ending_sleep_impl(builder: &mut Builder) {
/// let sleep_impl = std::sync::Arc::new(ForeverSleep);
/// builder.set_sleep_impl(Some(sleep_impl));
/// }
///
/// let mut builder = Config::builder();
/// set_never_ending_sleep_impl(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_sleep_impl(
&mut self,
sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
) -> &mut Self {
self.sleep_impl = sleep_impl;
self
}
/// Set the retry_config for the builder
///
/// # Examples
/// ```rust
/// use aws_sdk_emr::config::Config;
/// use aws_smithy_types::retry::RetryConfig;
///
/// let retry_config = RetryConfig::new().with_max_attempts(5);
/// let config = Config::builder().retry_config(retry_config).build();
/// ```
pub fn retry_config(mut self, retry_config: aws_smithy_types::retry::RetryConfig) -> Self {
self.set_retry_config(Some(retry_config));
self
}
/// Set the retry_config for the builder
///
/// # Examples
/// ```rust
/// use aws_sdk_emr::config::{Builder, Config};
/// use aws_smithy_types::retry::RetryConfig;
///
/// fn disable_retries(builder: &mut Builder) {
/// let retry_config = RetryConfig::new().with_max_attempts(1);
/// builder.set_retry_config(Some(retry_config));
/// }
///
/// let mut builder = Config::builder();
/// disable_retries(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_retry_config(
&mut self,
retry_config: Option<aws_smithy_types::retry::RetryConfig>,
) -> &mut Self {
self.retry_config = retry_config;
self
}
// TODO(docs): include an example of using a static endpoint
/// Sets the endpoint resolver to use when making requests.
pub fn endpoint_resolver(
mut self,
endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static,
) -> Self {
self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver));
self
}
/// Sets the AWS region to use when making requests.
pub fn region(mut self, region: impl Into<Option<aws_types::region::Region>>) -> Self {
self.region = region.into();
self
}
/// Sets the credentials provider for this service
pub fn credentials_provider(
mut self,
credentials_provider: impl aws_types::credentials::ProvideCredentials + 'static,
) -> Self {
self.credentials_provider = Some(aws_types::credentials::SharedCredentialsProvider::new(
credentials_provider,
));
self
}
/// Sets the credentials provider for this service
pub fn set_credentials_provider(
&mut self,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
) -> &mut Self {
self.credentials_provider = credentials_provider;
self
}
/// Builds a [`Config`].
pub fn build(self) -> Config {
Config {
app_name: self.app_name,
timeout_config: self.timeout_config,
sleep_impl: self.sleep_impl,
retry_config: self.retry_config,
endpoint_resolver: self
.endpoint_resolver
.unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())),
region: self.region,
credentials_provider: self.credentials_provider.unwrap_or_else(|| {
aws_types::credentials::SharedCredentialsProvider::new(
crate::no_credentials::NoCredentials,
)
}),
}
}
}
impl From<&aws_types::config::Config> for Builder {
fn from(input: &aws_types::config::Config) -> Self {
let mut builder = Builder::default();
builder = builder.region(input.region().cloned());
builder.set_retry_config(input.retry_config().cloned());
builder.set_timeout_config(input.timeout_config().cloned());
builder.set_sleep_impl(input.sleep_impl().clone());
builder.set_credentials_provider(input.credentials_provider().cloned());
builder.set_app_name(input.app_name().cloned());
builder
}
}
impl From<&aws_types::config::Config> for Config {
fn from(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
}
| 36.737705 | 106 | 0.627398 |
e2caa2426d7e8a6d6a8eebb0ba8a10cfb76daf9d | 14,938 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use common_arrow::parquet::FileMetaData;
use common_datablocks::DataBlock;
use common_datavalues::DataSchemaRef;
use common_exception::ErrorCode;
use common_exception::Result;
use common_planners::Expression;
use common_streams::SendableDataBlockStream;
use futures::stream::try_unfold;
use futures::stream::Stream;
use futures::StreamExt;
use futures::TryStreamExt;
use opendal::Operator;
use super::block_writer;
use crate::pipelines::transforms::ExpressionExecutor;
use crate::sessions::QueryContext;
use crate::storages::fuse::io::TableMetaLocationGenerator;
use crate::storages::fuse::meta::ColumnId;
use crate::storages::fuse::meta::ColumnMeta;
use crate::storages::fuse::meta::SegmentInfo;
use crate::storages::fuse::meta::Statistics;
use crate::storages::fuse::statistics::accumulator::BlockStatistics;
use crate::storages::fuse::statistics::StatisticsAccumulator;
pub type SegmentInfoStream =
std::pin::Pin<Box<dyn futures::stream::Stream<Item = Result<SegmentInfo>> + Send>>;
pub struct BlockStreamWriter {
num_block_threshold: usize,
data_accessor: Operator,
number_of_blocks_accumulated: usize,
statistics_accumulator: Option<StatisticsAccumulator>,
meta_locations: TableMetaLocationGenerator,
data_schema: DataSchemaRef,
cluster_keys: Vec<Expression>,
cluster_keys_index: Option<Vec<usize>>,
expression_executor: Option<ExpressionExecutor>,
ctx: Arc<QueryContext>,
}
impl BlockStreamWriter {
pub async fn write_block_stream(
ctx: Arc<QueryContext>,
block_stream: SendableDataBlockStream,
row_per_block: usize,
block_per_segment: usize,
meta_locations: TableMetaLocationGenerator,
data_schema: DataSchemaRef,
cluster_keys: Vec<Expression>,
) -> SegmentInfoStream {
// filter out empty blocks
let block_stream =
block_stream.try_filter(|block| std::future::ready(block.num_rows() > 0));
// merge or split the blocks according to the settings `row_per_block`
let block_stream_shaper = BlockCompactor::new(row_per_block);
let block_stream = Self::transform(block_stream, block_stream_shaper);
// flatten a TryStream of Vec<DataBlock> into a TryStream of DataBlock
let block_stream = block_stream
.map_ok(|vs| futures::stream::iter(vs.into_iter().map(Ok)))
.try_flatten();
// Write out the blocks.
// And transform the stream of DataBlocks into Stream of SegmentInfo at the same time.
let block_writer = BlockStreamWriter::new(
block_per_segment,
meta_locations,
ctx,
data_schema,
cluster_keys,
);
let segments = Self::transform(Box::pin(block_stream), block_writer);
Box::pin(segments)
}
pub fn new(
num_block_threshold: usize,
meta_locations: TableMetaLocationGenerator,
ctx: Arc<QueryContext>,
data_schema: DataSchemaRef,
cluster_keys: Vec<Expression>,
) -> Self {
let data_accessor = ctx.get_storage_operator().unwrap();
Self {
num_block_threshold,
data_accessor,
number_of_blocks_accumulated: 0,
statistics_accumulator: None,
meta_locations,
data_schema,
cluster_keys,
cluster_keys_index: None,
expression_executor: None,
ctx,
}
}
/// Transforms a stream of S to a stream of T
///
/// It's more like [Stream::filter_map] than [Stream::map] in the sense
/// that m items of input stream may be mapped to n items, where m <> n (but
/// for the convenience of impl, [TryStreamExt::try_unfold] is used).
pub(crate) fn transform<R, A, S, T>(
inputs: R,
mapper: A,
) -> impl futures::stream::Stream<Item = Result<T>>
where
R: Stream<Item = Result<S>> + Unpin,
A: Compactor<S, T>,
{
// For the convenience of passing mutable state back and forth, `unfold` is used
let init_state = (Some(mapper), inputs);
try_unfold(init_state, |(mapper, mut inputs)| async move {
if let Some(mut acc) = mapper {
while let Some(item) = inputs.next().await {
match acc.compact(item?).await? {
Some(item) => return Ok(Some((item, (Some(acc), inputs)))),
None => continue,
}
}
let remains = acc.finish()?;
Ok(remains.map(|t| (t, (None, inputs))))
} else {
Ok::<_, ErrorCode>(None)
}
})
}
async fn write_block(&mut self, data_block: DataBlock) -> Result<Option<SegmentInfo>> {
let input_schema = data_block.schema().clone();
let cluster_keys_index = if let Some(cluster_keys_index) = &self.cluster_keys_index {
cluster_keys_index.clone()
} else {
let fields = input_schema.fields().clone();
let index = self
.cluster_keys
.iter()
.map(|e| {
let cname = e.column_name();
fields.iter().position(|f| f.name() == &cname).unwrap()
})
.collect::<Vec<_>>();
self.cluster_keys_index = Some(index.clone());
index
};
let cluster_stats =
BlockStatistics::clusters_statistics(cluster_keys_index, data_block.clone())?;
// Remove unused columns before serialize
let block = if self.data_schema != input_schema {
let executor = if let Some(executor) = &self.expression_executor {
executor.clone()
} else {
let exprs: Vec<Expression> = input_schema
.fields()
.iter()
.map(|f| Expression::Column(f.name().to_owned()))
.collect();
let executor = ExpressionExecutor::try_create(
self.ctx.clone(),
"remove unused columns",
input_schema,
self.data_schema.clone(),
exprs,
true,
)?;
executor.validate()?;
self.expression_executor = Some(executor.clone());
executor
};
executor.execute(&data_block)?
} else {
data_block
};
let mut acc = self.statistics_accumulator.take().unwrap_or_default();
let partial_acc = acc.begin(&block, cluster_stats)?;
let schema = block.schema().to_arrow();
let location = self.meta_locations.gen_block_location();
let (file_size, file_meta_data) =
block_writer::write_block(&schema, block, self.data_accessor.clone(), &location)
.await?;
let col_metas = Self::column_metas(&file_meta_data)?;
acc = partial_acc.end(file_size, location, col_metas);
self.number_of_blocks_accumulated += 1;
if self.number_of_blocks_accumulated >= self.num_block_threshold {
let summary = acc.summary()?;
let cluster_stats = acc.summary_clusters();
let seg = SegmentInfo::new(acc.blocks_metas, Statistics {
row_count: acc.summary_row_count,
block_count: acc.summary_block_count,
uncompressed_byte_size: acc.in_memory_size,
compressed_byte_size: acc.file_size,
col_stats: summary,
cluster_stats,
});
// Reset state
self.number_of_blocks_accumulated = 0;
self.statistics_accumulator = None;
Ok(Some(seg))
} else {
// Stash the state
self.statistics_accumulator = Some(acc);
Ok(None)
}
}
fn column_metas(file_meta: &FileMetaData) -> Result<HashMap<ColumnId, ColumnMeta>> {
// currently we use one group only
let num_row_groups = file_meta.row_groups.len();
if num_row_groups != 1 {
return Err(ErrorCode::ParquetError(format!(
"invalid parquet file, expects only one row group, but got {}",
num_row_groups
)));
}
let row_group = &file_meta.row_groups[0];
let mut col_metas = HashMap::with_capacity(row_group.columns.len());
for (idx, col_chunk) in row_group.columns.iter().enumerate() {
match &col_chunk.meta_data {
Some(chunk_meta) => {
let col_start =
if let Some(dict_page_offset) = chunk_meta.dictionary_page_offset {
dict_page_offset
} else {
chunk_meta.data_page_offset
};
let col_len = chunk_meta.total_compressed_size;
assert!(
col_start >= 0 && col_len >= 0,
"column start and length should not be negative"
);
let num_values = chunk_meta.num_values as u64;
let res = ColumnMeta {
offset: col_start as u64,
len: col_len as u64,
num_values,
};
col_metas.insert(idx as u32, res);
}
None => {
return Err(ErrorCode::ParquetError(format!(
"invalid parquet file, meta data of column idx {} is empty",
idx
)))
}
}
}
Ok(col_metas)
}
}
/// Takes elements of type S in, and spills elements of type T.
#[async_trait::async_trait]
pub trait Compactor<S, T> {
/// Takes an element s of type S, convert it into [Some<T>] if possible;
/// otherwise, returns [None]
///
/// for example. given a DataBlock s, a setting of `max_row_per_block`
/// - Some<Vec<DataBlock>> might be returned if s contains more rows than `max_row_per_block`
/// in this case, s will been split into vector of (smaller) blocks
/// - or [None] might be returned if s is too small
/// in this case, s will be accumulated
async fn compact(&mut self, s: S) -> Result<Option<T>>;
/// Indicate that no more elements remains.
///
/// Spills [Some<T>] if there were, otherwise [None]
fn finish(self) -> Result<Option<T>>;
}
#[async_trait::async_trait]
impl Compactor<DataBlock, SegmentInfo> for BlockStreamWriter {
async fn compact(&mut self, s: DataBlock) -> Result<Option<SegmentInfo>> {
self.write_block(s).await
}
fn finish(mut self) -> Result<Option<SegmentInfo>> {
let acc = self.statistics_accumulator.take();
match acc {
None => Ok(None),
Some(acc) => {
let summary = acc.summary()?;
let cluster_stats = acc.summary_clusters();
let seg = SegmentInfo::new(acc.blocks_metas, Statistics {
row_count: acc.summary_row_count,
block_count: acc.summary_block_count,
uncompressed_byte_size: acc.in_memory_size,
compressed_byte_size: acc.file_size,
col_stats: summary,
cluster_stats,
});
Ok(Some(seg))
}
}
}
}
pub struct BlockCompactor {
// TODO threshold of block size
/// Max number of rows per data block
max_row_per_block: usize,
/// Number of rows accumulate in `accumulated_blocks`.
accumulated_rows: usize,
/// Small data blocks accumulated
///
/// Invariant: accumulated_blocks.iter().map(|item| item.num_rows()).sum() < max_row_per_block
accumulated_blocks: Vec<DataBlock>,
}
impl BlockCompactor {
pub fn new(max_row_per_block: usize) -> Self {
Self {
max_row_per_block,
accumulated_rows: 0,
accumulated_blocks: Vec::new(),
}
}
fn reset(&mut self, remains: Vec<DataBlock>) {
self.accumulated_rows = remains.iter().map(|item| item.num_rows()).sum();
self.accumulated_blocks = remains;
}
/// split or merge the DataBlock according to the configuration
pub fn compact(&mut self, block: DataBlock) -> Result<Option<Vec<DataBlock>>> {
let num_rows = block.num_rows();
// For cases like stmt `insert into .. select ... from ...`, the blocks that feeded
// are likely to be properly sized, i.e. exeactly `max_row_per_block` rows per block,
// In that cases, just return them.
if num_rows == self.max_row_per_block {
return Ok(Some(vec![block]));
}
if num_rows + self.accumulated_rows < self.max_row_per_block {
self.accumulated_rows += num_rows;
self.accumulated_blocks.push(block);
Ok(None)
} else {
let mut blocks = std::mem::take(&mut self.accumulated_blocks);
blocks.push(block);
let merged = DataBlock::concat_blocks(&blocks)?;
let blocks = DataBlock::split_block_by_size(&merged, self.max_row_per_block)?;
let (result, remains) = blocks
.into_iter()
.partition(|item| item.num_rows() >= self.max_row_per_block);
self.reset(remains);
Ok(Some(result))
}
}
/// Pack the remainders into a DataBlock
pub fn finish(self) -> Result<Option<Vec<DataBlock>>> {
let remains = self.accumulated_blocks;
Ok(if remains.is_empty() {
None
} else {
Some(vec![DataBlock::concat_blocks(&remains)?])
})
}
}
#[async_trait::async_trait]
impl Compactor<DataBlock, Vec<DataBlock>> for BlockCompactor {
async fn compact(&mut self, block: DataBlock) -> Result<Option<Vec<DataBlock>>> {
BlockCompactor::compact(self, block)
}
fn finish(self) -> Result<Option<Vec<DataBlock>>> {
BlockCompactor::finish(self)
}
}
| 37.438596 | 100 | 0.583478 |
1a704f0004a3882d04418e0638c4d835f8375210 | 554 | mod ansi_sql;
mod generic_sql;
pub mod keywords;
mod postgresql;
pub use self::ansi_sql::AnsiSqlDialect;
pub use self::generic_sql::GenericSqlDialect;
pub use self::postgresql::PostgreSqlDialect;
pub trait Dialect {
/// Get a list of keywords for this dialect
fn keywords(&self) -> Vec<&'static str>;
/// Determine if a character is a valid identifier start character
fn is_identifier_start(&self, ch: char) -> bool;
/// Determine if a character is a valid identifier character
fn is_identifier_part(&self, ch: char) -> bool;
}
| 30.777778 | 70 | 0.725632 |
61b24eff303092fb70a5d15f0c81369cfd677209 | 4,932 | use graph::prelude::CheapClone;
use once_cell::sync::OnceCell;
use stable_hash::crypto::SetHasher;
use stable_hash::prelude::*;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::{Arc, Condvar, Mutex};
type Hash = <SetHasher as StableHasher>::Out;
/// The 'true' cache entry that lives inside the Arc.
/// When the last Arc is dropped, this is dropped, and the cache is removed.
#[derive(Debug)]
struct CacheEntryInner<R> {
// Considered using once_cell::sync::Lazy,
// but that quickly becomes a mess of generics
// or runs into the issue that Box<dyn FnOnce> can't be
// called at all, so doesn't impl FnOnce as Lazy requires.
result: OnceCell<Option<R>>,
// Temporary to implement OnceCell.wait
condvar: Condvar,
lock: Mutex<bool>,
}
impl<R> CacheEntryInner<R> {
fn new() -> Arc<Self> {
Arc::new(Self {
result: OnceCell::new(),
condvar: Condvar::new(),
lock: Mutex::new(false),
})
}
fn set_inner(&self, value: Option<R>) {
// Store the cached value
self.result
.set(value)
.unwrap_or_else(|_| panic!("Cache set should only be called once"));
// Wakeup consumers of the cache
let mut is_set = self.lock.lock().unwrap();
*is_set = true;
self.condvar.notify_all();
}
fn set(&self, value: R) {
self.set_inner(Some(value));
}
fn set_panic(&self) {
self.set_inner(None);
}
fn wait(&self) -> &R {
// Happy path - already cached.
if let Some(r) = self.result.get() {
match r.as_ref() {
Some(r) => r,
// TODO: Instead of having an Option,
// retain panic information and propagate it.
None => panic!("Query panicked"),
}
} else {
// Wait for the item to be placed in the cache.
let mut is_set = self.lock.lock().unwrap();
while !*is_set {
is_set = self.condvar.wait(is_set).unwrap();
}
self.wait()
}
}
}
/// On drop, call set_panic on self.value,
/// unless set was called.
struct PanicHelper<R> {
value: Option<Arc<CacheEntryInner<R>>>,
}
impl<R> Drop for PanicHelper<R> {
fn drop(&mut self) {
if let Some(inner) = self.value.take() {
inner.set_panic();
}
}
}
impl<R> PanicHelper<R> {
fn new(value: Arc<CacheEntryInner<R>>) -> Self {
Self { value: Some(value) }
}
fn set(mut self, r: R) -> Arc<CacheEntryInner<R>> {
let value = self.value.take().unwrap();
value.set(r);
value
}
}
/// Cache that keeps a result around as long as it is still being processed.
/// The cache ensures that the query is not re-entrant, so multiple consumers
/// of identical queries will not execute them in parallel.
///
/// This has a lot in common with AsyncCache in the network-services repo,
/// but is sync instead of async, and more specialized.
pub struct QueryCache<R> {
cache: Arc<Mutex<HashMap<Hash, Arc<CacheEntryInner<R>>>>>,
}
impl<R: CheapClone> QueryCache<R> {
pub fn new() -> Self {
Self {
cache: Arc::new(Mutex::new(HashMap::new())),
}
}
/// Assumption: Whatever F is passed in consistently returns the same
/// value for any input - for all values of F used with this Cache.
pub fn cached_query<F: FnOnce() -> R>(&self, hash: Hash, f: F) -> R {
let work = {
let mut cache = self.cache.lock().unwrap();
// Try to pull the item out of the cache and return it.
// If we get past this expr, it means this thread will do
// the work and fullfil that 'promise' in this work variable.
match cache.entry(hash) {
Entry::Occupied(entry) => {
// Another thread is doing the work, release the lock and wait for it.
let entry = entry.get().cheap_clone();
drop(cache);
return entry.wait().cheap_clone();
}
Entry::Vacant(entry) => {
let uncached = CacheEntryInner::new();
entry.insert(uncached.clone());
uncached
}
}
};
let _remove_guard = defer::defer(|| {
// Remove this from the list of in-flight work.
self.cache.lock().unwrap().remove(&hash);
});
// Now that we have taken on the responsibility, propagate panics to
// make sure that no threads wait forever on a result that will never
// come.
let work = PanicHelper::new(work);
// Actually compute the value and then share it with waiters.
let value = f();
work.set(value.cheap_clone());
value
}
}
| 31.414013 | 90 | 0.562247 |
38b08ef380efd4312793073a50116729e07e8e41 | 12,216 | use crate::{ConnectParams, HdbError, HdbResult, IntoConnectParamsBuilder, ServerCerts};
use secstr::SecStr;
/// A builder for `ConnectParams`.
///
/// # Example
///
/// ```rust
/// use hdbconnect::ConnectParams;
///
/// let connect_params = ConnectParams::builder()
/// .hostname("abcd123")
/// .port(2222)
/// .dbuser("MEIER")
/// .password("schlau")
/// .build()
/// .unwrap();
/// ```
///
/// ## Instantiating a `ConnectParamsBuilder` from a URL
///
/// The URL is supposed to have the same form as for `ConnectParams`
/// (i.e. `<scheme>://<username>:<password>@<host>:<port>[<options>]`,
/// see [Using a URL](struct.ConnectParams.html#using-a-url)),
/// but only scheme and host
/// are mandatory.
///
/// ### Example
///
/// ```rust
/// use hdbconnect::IntoConnectParamsBuilder;
///
/// let conn_params = "hdbsql://abcd123:2222"
/// .into_connect_params_builder()
/// .unwrap()
/// .dbuser("MEIER")
/// .password("schlau")
/// .build()
/// .unwrap();
/// ```
#[derive(Clone, Debug, Default, PartialEq, Serialize)]
#[serde(into = "String")]
pub struct ConnectParamsBuilder {
hostname: Option<String>,
port: Option<u16>,
dbuser: Option<String>,
#[serde(skip)]
password: Option<SecStr>,
clientlocale: Option<String>,
server_certs: Vec<ServerCerts>,
options: Vec<(String, String)>,
#[cfg(feature = "alpha_nonblocking")]
use_nonblocking: bool,
}
impl ConnectParamsBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
hostname: None,
port: None,
dbuser: None,
password: None,
clientlocale: None,
server_certs: Vec::<ServerCerts>::default(),
options: vec![],
#[cfg(feature = "alpha_nonblocking")]
use_nonblocking: false,
}
}
/// Sets the hostname.
pub fn hostname<H: AsRef<str>>(&mut self, hostname: H) -> &mut Self {
self.hostname = Some(hostname.as_ref().to_owned());
self
}
/// Sets the port.
pub fn port(&mut self, port: u16) -> &mut Self {
self.port = Some(port);
self
}
/// Sets the database user.
pub fn dbuser<D: AsRef<str>>(&mut self, dbuser: D) -> &mut Self {
self.dbuser = Some(dbuser.as_ref().to_owned());
self
}
/// Sets the password.
pub fn password<P: AsRef<str>>(&mut self, pw: P) -> &mut Self {
self.password = Some(SecStr::new(pw.as_ref().as_bytes().to_vec()));
self
}
/// Unsets the password.
pub fn unset_password(&mut self) -> &mut Self {
self.password = None;
self
}
/// Sets the client locale.
pub fn clientlocale<P: AsRef<str>>(&mut self, cl: P) -> &mut Self {
self.clientlocale = Some(cl.as_ref().to_owned());
self
}
/// Sets the client locale from the value of the environment variable LANG
pub fn clientlocale_from_env_lang(&mut self) -> &mut Self {
self.clientlocale = match std::env::var("LANG") {
Ok(l) => Some(l),
Err(_) => None,
};
self
}
/// Sets the client locale.
#[cfg(feature = "alpha_nonblocking")]
pub fn use_nonblocking(&mut self) -> &mut Self {
self.use_nonblocking = true;
self
}
/// Makes the driver use TLS for the connection to the database.
///
/// Requires that the server's certificate is provided with one of the
/// enum variants of [`ServerCerts`](enum.ServerCerts.html).
///
/// If needed, you can call this function multiple times with different `ServerCert` variants.
///
/// Example:
///
/// ```rust,no_run
/// # use hdbconnect::{ConnectParams,ServerCerts};
/// # let string_with_certificate = String::new();
/// let mut conn_params = ConnectParams::builder()
/// // ...more settings required...
/// .tls_with(ServerCerts::Direct(string_with_certificate))
/// .build();
/// ```
pub fn tls_with(&mut self, server_certs: ServerCerts) -> &mut Self {
self.server_certs.push(server_certs);
self
}
/// Adds a runtime parameter.
pub fn option(&mut self, name: &str, value: &str) -> &mut Self {
self.options.push((name.to_string(), value.to_string()));
self
}
/// Constructs a `ConnectParams` from the builder.
///
/// # Errors
/// `HdbError::Usage` if the builder was not yet configured to
/// create a meaningful `ConnectParams`
pub fn build(&self) -> HdbResult<ConnectParams> {
let host = match self.hostname {
Some(ref s) => s.clone(),
None => return Err(HdbError::Usage("hostname is missing")),
};
let addr = format!(
"{}:{}",
host,
match self.port {
Some(p) => p,
None => return Err(HdbError::Usage("port is missing")),
}
);
let dbuser = match self.dbuser {
Some(ref s) => s.clone(),
None => return Err(HdbError::Usage("dbuser is missing")),
};
let password = match self.password {
Some(ref secstr) => secstr.clone(),
None => return Err(HdbError::Usage("password is missing")),
};
Ok(ConnectParams::new(
host,
addr,
dbuser,
password,
self.clientlocale.clone(),
self.server_certs.clone(),
#[cfg(feature = "alpha_nonblocking")]
self.use_nonblocking,
))
}
/// Returns the url for this connection
///
/// # Errors
///
/// `HdbError::Usage` if the builder was not yet configured to
/// build a correct url
pub fn to_url(&self) -> HdbResult<String> {
if let Some(dbuser) = &self.dbuser {
if let Some(hostname) = &self.hostname {
if let Some(port) = &self.port {
return Ok(format!(
"{}://{}@{}:{}{}",
self.get_protocol_name(),
dbuser,
hostname,
port,
self.get_options_as_parameters()
));
}
}
}
Err(HdbError::Usage("URL requires dbuser, hostname, and port"))
}
fn get_protocol_name(&self) -> &str {
if self.server_certs.is_empty() {
"hdbsql"
} else {
"hdbsqls"
}
}
fn get_options_as_parameters(&self) -> String {
let mut result = String::with_capacity(200);
for (index, s) in self
.options
.iter()
.map(|(k, v)| {
if v.is_empty() {
k.clone()
} else {
format!("{}={}", k, v)
}
})
.chain(self.server_certs.iter().map(ToString::to_string))
.chain(
self.clientlocale
.iter()
.map(|l| format!("{}={}", super::cp_url::OPTION_CLIENT_LOCALE, l)),
)
.enumerate()
{
let prefix = if index == 0 { "?" } else { "&" };
result.push_str(&format!("{}{}", prefix, s));
}
result
}
/// Getter
pub fn get_hostname(&self) -> Option<&str> {
self.hostname.as_deref()
}
/// Getter
pub fn get_dbuser(&self) -> Option<&str> {
self.dbuser.as_deref()
}
/// Getter
pub fn get_password(&self) -> Option<&SecStr> {
self.password.as_ref()
}
/// Getter
pub fn get_port(&self) -> Option<u16> {
self.port
}
/// Getter
pub fn get_clientlocale(&self) -> Option<&str> {
self.clientlocale.as_deref()
}
/// Getter
pub fn get_server_certs(&self) -> &Vec<ServerCerts> {
&self.server_certs
}
/// Getter
pub fn get_options(&self) -> &Vec<(String, String)> {
&self.options
}
}
impl<'de> serde::de::Deserialize<'de> for ConnectParamsBuilder {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let visitor = Visitor();
deserializer.deserialize_str(visitor)
}
}
struct Visitor();
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = ConnectParamsBuilder;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a String in the form of a url")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
IntoConnectParamsBuilder::into_connect_params_builder(v).map_err(E::custom)
}
}
impl Into<String> for ConnectParamsBuilder {
fn into(mut self) -> String {
self.unset_password();
self.to_url().unwrap()
}
}
#[cfg(test)]
mod test {
use super::super::into_connect_params_builder::IntoConnectParamsBuilder;
use super::ConnectParamsBuilder;
use super::ServerCerts;
#[test]
fn test_connect_params_builder() {
{
let params = ConnectParamsBuilder::new()
.hostname("abcd123")
.port(2222)
.dbuser("MEIER")
.password("schLau")
.build()
.unwrap();
assert_eq!("MEIER", params.dbuser());
assert_eq!(b"schLau", params.password().unsecure());
assert_eq!("abcd123:2222", params.addr());
assert_eq!(None, params.clientlocale());
assert!(params.server_certs().is_empty());
}
{
let mut builder = ConnectParamsBuilder::new();
builder
.hostname("abcd123")
.port(2222)
.dbuser("MEIER")
.password("schLau")
.clientlocale("de_DE");
builder.tls_with(crate::ServerCerts::Directory("TCD".to_string()));
builder.tls_with(crate::ServerCerts::RootCertificates);
let params = builder.build().unwrap();
assert_eq!("MEIER", params.dbuser());
assert_eq!(b"schLau", params.password().unsecure());
assert_eq!(Some("de_DE"), params.clientlocale());
assert_eq!(
ServerCerts::Directory("TCD".to_string()),
*params.server_certs().get(0).unwrap()
);
assert_eq!(
ServerCerts::RootCertificates,
*params.server_certs().get(1).unwrap()
);
}
{
let builder = "hdbsql://MEIER:schLau@abcd123:2222"
.into_connect_params_builder()
.unwrap();
assert_eq!("MEIER", builder.get_dbuser().unwrap());
assert_eq!(b"schLau", builder.get_password().unwrap().unsecure());
assert_eq!("abcd123", builder.get_hostname().unwrap());
assert_eq!(2222, builder.get_port().unwrap());
assert_eq!(None, builder.get_clientlocale());
assert!(builder.get_server_certs().is_empty());
}
}
#[test]
fn serde_test() {
#[derive(Serialize, Deserialize, Debug)]
struct Data {
x: ConnectParamsBuilder,
}
let mut data = Data {
x: ConnectParamsBuilder::new(),
};
data.x
.hostname("abcd123")
.port(2222)
.dbuser("MEIER")
.password("schLau")
.clientlocale("de_DE")
.tls_with(crate::ServerCerts::Directory("TCD".to_string()))
.tls_with(crate::ServerCerts::RootCertificates);
let serialized = serde_json::to_string(&data).unwrap();
assert_eq!(
r#"{"x":"hdbsqls://MEIER@abcd123:2222?tls_certificate_dir=TCD&use_mozillas_root_certificates&client_locale=de_DE"}"#,
serialized
);
let deserialized: Data = serde_json::from_str(&serialized).unwrap();
assert_ne!(data.x, deserialized.x);
data.x.unset_password();
assert_eq!(data.x, deserialized.x);
}
}
| 29.867971 | 129 | 0.53127 |
e9864d2f42ce51bfb3f66f1b4e5a5dbfd7456e0d | 19,410 | use std::arch::x86_64::*;
use std::intrinsics::transmute;
use crate::convolution::optimisations::CoefficientsI16Chunk;
use crate::convolution::{optimisations, Bound, Coefficients};
use crate::image_view::{FourRows, FourRowsMut, TypedImageView, TypedImageViewMut};
use crate::pixels::U8x4;
use crate::simd_utils;
// This code is based on C-implementation from Pillow-SIMD package for Python
// https://github.com/uploadcare/pillow-simd
#[inline]
pub(crate) fn horiz_convolution(
src_image: TypedImageView<U8x4>,
mut dst_image: TypedImageViewMut<U8x4>,
offset: u32,
coeffs: Coefficients,
) {
let (values, window_size, bounds_per_pixel) =
(coeffs.values, coeffs.window_size, coeffs.bounds);
let normalizer_guard = optimisations::NormalizerGuard::new(values);
let precision = normalizer_guard.precision();
let coefficients_chunks =
normalizer_guard.normalized_i16_chunks(window_size, &bounds_per_pixel);
let dst_height = dst_image.height().get();
let src_iter = src_image.iter_4_rows(offset, dst_height + offset);
let dst_iter = dst_image.iter_4_rows_mut();
for (src_rows, dst_rows) in src_iter.zip(dst_iter) {
unsafe {
horiz_convolution_8u4x(src_rows, dst_rows, &coefficients_chunks, precision);
}
}
let mut yy = dst_height - dst_height % 4;
while yy < dst_height {
unsafe {
horiz_convolution_8u(
src_image.get_row(yy + offset).unwrap(),
dst_image.get_row_mut(yy).unwrap(),
&coefficients_chunks,
precision,
);
}
yy += 1;
}
}
#[inline]
pub(crate) fn vert_convolution(
src_image: TypedImageView<U8x4>,
mut dst_image: TypedImageViewMut<U8x4>,
coeffs: Coefficients,
) {
let (values, window_size, bounds) = (coeffs.values, coeffs.window_size, coeffs.bounds);
let normalizer_guard = optimisations::NormalizerGuard::new(values);
let precision = normalizer_guard.precision();
let coeffs_i16 = normalizer_guard.normalized_i16();
let coeffs_chunks = coeffs_i16.chunks(window_size);
let dst_rows = dst_image.iter_rows_mut();
for ((&bound, k), dst_row) in bounds.iter().zip(coeffs_chunks).zip(dst_rows) {
unsafe {
vert_convolution_8u(&src_image, dst_row, k, bound, precision);
}
}
}
/// For safety, it is necessary to ensure the following conditions:
/// - length of all rows in src_rows must be equal
/// - length of all rows in dst_rows must be equal
/// - coefficients_chunks.len() == dst_rows.0.len()
/// - max(chunk.start + chunk.values.len() for chunk in coefficients_chunks) <= src_row.0.len()
/// - precision <= MAX_COEFS_PRECISION
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn horiz_convolution_8u4x(
src_rows: FourRows<u32>,
dst_rows: FourRowsMut<u32>,
coefficients_chunks: &[CoefficientsI16Chunk],
precision: u8,
) {
let (s_row0, s_row1, s_row2, s_row3) = src_rows;
let (d_row0, d_row1, d_row2, d_row3) = dst_rows;
let zero = _mm256_setzero_si256();
let initial = _mm256_set1_epi32(1 << (precision - 1));
#[rustfmt::skip]
let sh1 = _mm256_set_epi8(
-1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
-1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
);
#[rustfmt::skip]
let sh2 = _mm256_set_epi8(
-1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
-1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
);
for (dst_x, coeffs_chunk) in coefficients_chunks.iter().enumerate() {
let x_start = coeffs_chunk.start as usize;
let mut x: usize = 0;
let mut sss0 = initial;
let mut sss1 = initial;
let coeffs = coeffs_chunk.values;
let coeffs_by_4 = coeffs.chunks_exact(4);
let reminder1 = coeffs_by_4.remainder();
for k in coeffs_by_4 {
let mmk0 = simd_utils::ptr_i16_to_256set1_epi32(k, 0);
let mmk1 = simd_utils::ptr_i16_to_256set1_epi32(k, 2);
let mut source = _mm256_inserti128_si256::<1>(
_mm256_castsi128_si256(simd_utils::loadu_si128(s_row0, x + x_start)),
simd_utils::loadu_si128(s_row1, x + x_start),
);
let mut pix = _mm256_shuffle_epi8(source, sh1);
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk0));
pix = _mm256_shuffle_epi8(source, sh2);
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk1));
source = _mm256_inserti128_si256::<1>(
_mm256_castsi128_si256(simd_utils::loadu_si128(s_row2, x + x_start)),
simd_utils::loadu_si128(s_row3, x + x_start),
);
pix = _mm256_shuffle_epi8(source, sh1);
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk0));
pix = _mm256_shuffle_epi8(source, sh2);
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk1));
x += 4;
}
let coeffs_by_2 = reminder1.chunks_exact(2);
let reminder2 = coeffs_by_2.remainder();
for k in coeffs_by_2 {
let mmk = simd_utils::ptr_i16_to_256set1_epi32(k, 0);
let mut pix = _mm256_inserti128_si256::<1>(
_mm256_castsi128_si256(simd_utils::loadl_epi64(s_row0, x + x_start)),
simd_utils::loadl_epi64(s_row1, x + x_start),
);
pix = _mm256_shuffle_epi8(pix, sh1);
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
pix = _mm256_inserti128_si256::<1>(
_mm256_castsi128_si256(simd_utils::loadl_epi64(s_row2, x + x_start)),
simd_utils::loadl_epi64(s_row3, x + x_start),
);
pix = _mm256_shuffle_epi8(pix, sh1);
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
x += 2;
}
for &k in reminder2 {
// [16] xx k0 xx k0 xx k0 xx k0 xx k0 xx k0 xx k0 xx k0
let mmk = _mm256_set1_epi32(k as i32);
// [16] xx a0 xx b0 xx g0 xx r0 xx a0 xx b0 xx g0 xx r0
let mut pix = _mm256_inserti128_si256::<1>(
_mm256_castsi128_si256(simd_utils::mm_cvtepu8_epi32(s_row0, x + x_start)),
simd_utils::mm_cvtepu8_epi32(s_row1, x + x_start),
);
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
pix = _mm256_inserti128_si256::<1>(
_mm256_castsi128_si256(simd_utils::mm_cvtepu8_epi32(s_row2, x + x_start)),
simd_utils::mm_cvtepu8_epi32(s_row3, x + x_start),
);
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
x += 1;
}
macro_rules! call {
($imm8:expr) => {{
sss0 = _mm256_srai_epi32::<$imm8>(sss0);
sss1 = _mm256_srai_epi32::<$imm8>(sss1);
}};
}
constify_imm8!(precision, call);
sss0 = _mm256_packs_epi32(sss0, zero);
sss1 = _mm256_packs_epi32(sss1, zero);
sss0 = _mm256_packus_epi16(sss0, zero);
sss1 = _mm256_packus_epi16(sss1, zero);
*d_row0.get_unchecked_mut(dst_x) =
transmute(_mm_cvtsi128_si32(_mm256_extracti128_si256::<0>(sss0)));
*d_row1.get_unchecked_mut(dst_x) =
transmute(_mm_cvtsi128_si32(_mm256_extracti128_si256::<1>(sss0)));
*d_row2.get_unchecked_mut(dst_x) =
transmute(_mm_cvtsi128_si32(_mm256_extracti128_si256::<0>(sss1)));
*d_row3.get_unchecked_mut(dst_x) =
transmute(_mm_cvtsi128_si32(_mm256_extracti128_si256::<1>(sss1)));
}
}
/// For safety, it is necessary to ensure the following conditions:
/// - bounds.len() == dst_row.len()
/// - coeffs.len() == dst_rows.0.len() * window_size
/// - max(bound.start + bound.size for bound in bounds) <= src_row.len()
/// - precision <= MAX_COEFS_PRECISION
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn horiz_convolution_8u(
src_row: &[u32],
dst_row: &mut [u32],
coefficients_chunks: &[CoefficientsI16Chunk],
precision: u8,
) {
#[rustfmt::skip]
let sh1 = _mm256_set_epi8(
-1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
-1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
);
#[rustfmt::skip]
let sh2 = _mm256_set_epi8(
11, 10, 9, 8, 11, 10, 9, 8, 11, 10, 9, 8, 11, 10, 9, 8,
3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0,
);
#[rustfmt::skip]
let sh3 = _mm256_set_epi8(
-1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
-1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
);
#[rustfmt::skip]
let sh4 = _mm256_set_epi8(
15, 14, 13, 12, 15, 14, 13, 12, 15, 14, 13, 12, 15, 14, 13, 12,
7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4,
);
#[rustfmt::skip]
let sh5 = _mm256_set_epi8(
-1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
-1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
);
#[rustfmt::skip]
let sh6 = _mm256_set_epi8(
7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4,
3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0,
);
let sh7 = _mm_set_epi8(-1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
for (dst_x, &coeffs_chunk) in coefficients_chunks.iter().enumerate() {
let x_start = coeffs_chunk.start as usize;
let mut x: usize = 0;
let mut coeffs = coeffs_chunk.values;
let mut sss: __m128i = if coeffs.len() < 8 {
_mm_set1_epi32(1 << (precision - 1))
} else {
// Lower part will be added to higher, use only half of the error
let mut sss256 = _mm256_set1_epi32(1 << (precision - 2));
let coeffs_by_8 = coeffs.chunks_exact(8);
let reminder1 = coeffs_by_8.remainder();
for k in coeffs_by_8 {
let tmp = simd_utils::loadu_si128(k, 0);
let ksource = _mm256_insertf128_si256::<1>(_mm256_castsi128_si256(tmp), tmp);
let source = simd_utils::loadu_si256(src_row, x + x_start);
let mut pix = _mm256_shuffle_epi8(source, sh1);
let mut mmk = _mm256_shuffle_epi8(ksource, sh2);
sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
pix = _mm256_shuffle_epi8(source, sh3);
mmk = _mm256_shuffle_epi8(ksource, sh4);
sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
x += 8;
}
let coeffs_by_4 = reminder1.chunks_exact(4);
coeffs = coeffs_by_4.remainder();
for k in coeffs_by_4 {
let tmp = simd_utils::loadl_epi64(k, 0);
let ksource = _mm256_insertf128_si256::<1>(_mm256_castsi128_si256(tmp), tmp);
let tmp = simd_utils::loadu_si128(src_row, x + x_start);
let source = _mm256_insertf128_si256::<1>(_mm256_castsi128_si256(tmp), tmp);
let pix = _mm256_shuffle_epi8(source, sh5);
let mmk = _mm256_shuffle_epi8(ksource, sh6);
sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
x += 4;
}
_mm_add_epi32(
_mm256_extracti128_si256::<0>(sss256),
_mm256_extracti128_si256::<1>(sss256),
)
};
let coeffs_by_2 = coeffs.chunks_exact(2);
let reminder1 = coeffs_by_2.remainder();
for k in coeffs_by_2 {
let mmk = simd_utils::ptr_i16_to_set1_epi32(k, 0);
let source = simd_utils::loadl_epi64(src_row, x + x_start);
let pix = _mm_shuffle_epi8(source, sh7);
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
x += 2
}
for &k in reminder1 {
let pix = simd_utils::mm_cvtepu8_epi32(src_row, x + x_start);
let mmk = _mm_set1_epi32(k as i32);
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
x += 1;
}
macro_rules! call {
($imm8:expr) => {{
sss = _mm_srai_epi32::<$imm8>(sss);
}};
}
constify_imm8!(precision, call);
sss = _mm_packs_epi32(sss, sss);
*dst_row.get_unchecked_mut(dst_x) =
transmute(_mm_cvtsi128_si32(_mm_packus_epi16(sss, sss)));
}
}
#[inline]
#[target_feature(enable = "avx2")]
pub(crate) unsafe fn vert_convolution_8u(
src_img: &TypedImageView<U8x4>,
dst_row: &mut [u32],
coeffs: &[i16],
bound: Bound,
precision: u8,
) {
let src_width = src_img.width().get() as usize;
let y_start = bound.start;
let y_size = bound.size;
let initial = _mm_set1_epi32(1 << (precision - 1));
let initial_256 = _mm256_set1_epi32(1 << (precision - 1));
let mut x: usize = 0;
while x < src_width.saturating_sub(7) {
let mut sss0 = initial_256;
let mut sss1 = initial_256;
let mut sss2 = initial_256;
let mut sss3 = initial_256;
let mut y: u32 = 0;
for (s_row1, s_row2) in src_img.iter_2_rows(y_start, y_start + y_size) {
// Load two coefficients at once
let mmk = simd_utils::ptr_i16_to_256set1_epi32(coeffs, y as usize);
let source1 = simd_utils::loadu_si256(s_row1, x); // top line
let source2 = simd_utils::loadu_si256(s_row2, x); // bottom line
let mut source = _mm256_unpacklo_epi8(source1, source2);
let mut pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
source = _mm256_unpackhi_epi8(source1, source2);
pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix, mmk));
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix, mmk));
y += 2;
}
for s_row in src_img.iter_rows(y_start + y, y_start + y_size) {
let mmk = _mm256_set1_epi32(coeffs[y as usize] as i32);
let source1 = simd_utils::loadu_si256(s_row, x); // top line
let source2 = _mm256_setzero_si256(); // bottom line is empty
let mut source = _mm256_unpacklo_epi8(source1, source2);
let mut pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
source = _mm256_unpackhi_epi8(source1, _mm256_setzero_si256());
pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix, mmk));
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix, mmk));
y += 1;
}
macro_rules! call {
($imm8:expr) => {{
sss0 = _mm256_srai_epi32::<$imm8>(sss0);
sss1 = _mm256_srai_epi32::<$imm8>(sss1);
sss2 = _mm256_srai_epi32::<$imm8>(sss2);
sss3 = _mm256_srai_epi32::<$imm8>(sss3);
}};
}
constify_imm8!(precision, call);
sss0 = _mm256_packs_epi32(sss0, sss1);
sss2 = _mm256_packs_epi32(sss2, sss3);
sss0 = _mm256_packus_epi16(sss0, sss2);
let dst_ptr = dst_row.get_unchecked_mut(x..).as_mut_ptr() as *mut __m256i;
_mm256_storeu_si256(dst_ptr, sss0);
x += 8;
}
while x < src_width.saturating_sub(1) {
let mut sss0 = initial; // left row
let mut sss1 = initial; // right row
let mut y: u32 = 0;
for (s_row1, s_row2) in src_img.iter_2_rows(y_start, y_start + y_size) {
// Load two coefficients at once
let mmk = simd_utils::ptr_i16_to_set1_epi32(coeffs, y as usize);
let source1 = simd_utils::loadl_epi64(s_row1, x); // top line
let source2 = simd_utils::loadl_epi64(s_row2, x); // bottom line
let source = _mm_unpacklo_epi8(source1, source2);
let mut pix = _mm_unpacklo_epi8(source, _mm_setzero_si128());
sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix, mmk));
pix = _mm_unpackhi_epi8(source, _mm_setzero_si128());
sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix, mmk));
y += 2;
}
for s_row in src_img.iter_rows(y_start + y, y_start + y_size) {
let mmk = _mm_set1_epi32(*coeffs.get_unchecked(y as usize) as i32);
let source1 = simd_utils::loadl_epi64(s_row, x); // top line
let source2 = _mm_setzero_si128(); // bottom line is empty
let source = _mm_unpacklo_epi8(source1, source2);
let mut pix = _mm_unpacklo_epi8(source, _mm_setzero_si128());
sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix, mmk));
pix = _mm_unpackhi_epi8(source, _mm_setzero_si128());
sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix, mmk));
y += 1;
}
macro_rules! call {
($imm8:expr) => {{
sss0 = _mm_srai_epi32::<$imm8>(sss0);
sss1 = _mm_srai_epi32::<$imm8>(sss1);
}};
}
constify_imm8!(precision, call);
sss0 = _mm_packs_epi32(sss0, sss1);
sss0 = _mm_packus_epi16(sss0, sss0);
let dst_ptr = dst_row.get_unchecked_mut(x..).as_mut_ptr() as *mut __m128i;
_mm_storel_epi64(dst_ptr, sss0);
x += 2;
}
while x < src_width {
let mut sss = initial;
let mut y: u32 = 0;
for (s_row1, s_row2) in src_img.iter_2_rows(y_start, y_start + y_size) {
// Load two coefficients at once
let mmk = simd_utils::ptr_i16_to_set1_epi32(coeffs, y as usize);
let source1 = simd_utils::mm_cvtsi32_si128(s_row1, x); // top line
let source2 = simd_utils::mm_cvtsi32_si128(s_row2, x); // bottom line
let source = _mm_unpacklo_epi8(source1, source2);
let pix = _mm_unpacklo_epi8(source, _mm_setzero_si128());
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
y += 2;
}
for s_row in src_img.iter_rows(y_start + y, y_start + y_size) {
let pix = simd_utils::mm_cvtepu8_epi32(s_row, x);
let mmk = _mm_set1_epi32(*coeffs.get_unchecked(y as usize) as i32);
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
y += 1;
}
macro_rules! call {
($imm8:expr) => {{
sss = _mm_srai_epi32::<$imm8>(sss);
}};
}
constify_imm8!(precision, call);
sss = _mm_packs_epi32(sss, sss);
*dst_row.get_unchecked_mut(x) = transmute(_mm_cvtsi128_si32(_mm_packus_epi16(sss, sss)));
x += 1;
}
}
| 37.910156 | 97 | 0.584029 |
89bb5797a828f3770d78f02ad1ba0c96dc10e62e | 77,118 | // Decoding metadata from a single crate's metadata
use crate::creader::CrateMetadataRef;
use crate::rmeta::table::{FixedSizeEncoding, Table};
use crate::rmeta::*;
use rustc_ast as ast;
use rustc_attr as attr;
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{Lock, LockGuard, Lrc, OnceCell};
use rustc_data_structures::unhash::UnhashMap;
use rustc_errors::ErrorReported;
use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
use rustc_expand::proc_macro::{AttrProcMacro, BangProcMacro, ProcMacroDerive};
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::{DefKey, DefPath, DefPathData, DefPathHash};
use rustc_hir::diagnostic_items::DiagnosticItems;
use rustc_hir::lang_items;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::hir::exports::Export;
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel};
use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState};
use rustc_middle::mir::{self, Body, Promoted};
use rustc_middle::thir;
use rustc_middle::ty::codec::TyDecoder;
use rustc_middle::ty::{self, Ty, TyCtxt, Visibility};
use rustc_serialize::{opaque, Decodable, Decoder};
use rustc_session::cstore::{
CrateSource, ExternCrate, ForeignModule, LinkagePreference, NativeLib,
};
use rustc_session::Session;
use rustc_span::hygiene::{ExpnIndex, MacroKind};
use rustc_span::source_map::{respan, Spanned};
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{self, BytePos, ExpnId, Pos, Span, SyntaxContext, DUMMY_SP};
use proc_macro::bridge::client::ProcMacro;
use std::io;
use std::mem;
use std::num::NonZeroUsize;
use std::path::Path;
use tracing::debug;
pub use cstore_impl::{provide, provide_extern};
use rustc_span::hygiene::HygieneDecodeContext;
mod cstore_impl;
/// A reference to the raw binary version of crate metadata.
/// A `MetadataBlob` internally is just a reference counted pointer to
/// the actual data, so cloning it is cheap.
#[derive(Clone)]
crate struct MetadataBlob(Lrc<MetadataRef>);
// This is needed so we can create an OwningRef into the blob.
// The data behind a `MetadataBlob` has a stable address because it is
// contained within an Rc/Arc.
unsafe impl rustc_data_structures::owning_ref::StableAddress for MetadataBlob {}
// This is needed so we can create an OwningRef into the blob.
impl std::ops::Deref for MetadataBlob {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
&self.0[..]
}
}
// A map from external crate numbers (as decoded from some crate file) to
// local crate numbers (as generated during this session). Each external
// crate may refer to types in other external crates, and each has their
// own crate numbers.
crate type CrateNumMap = IndexVec<CrateNum, CrateNum>;
crate struct CrateMetadata {
/// The primary crate data - binary metadata blob.
blob: MetadataBlob,
// --- Some data pre-decoded from the metadata blob, usually for performance ---
/// Properties of the whole crate.
/// NOTE(eddyb) we pass `'static` to a `'tcx` parameter because this
/// lifetime is only used behind `Lazy`, and therefore acts like a
/// universal (`for<'tcx>`), that is paired up with whichever `TyCtxt`
/// is being used to decode those values.
root: CrateRoot<'static>,
/// Trait impl data.
/// FIXME: Used only from queries and can use query cache,
/// so pre-decoding can probably be avoided.
trait_impls:
FxHashMap<(u32, DefIndex), Lazy<[(DefIndex, Option<ty::fast_reject::SimplifiedType>)]>>,
/// Proc macro descriptions for this crate, if it's a proc macro crate.
raw_proc_macros: Option<&'static [ProcMacro]>,
/// Source maps for code from the crate.
source_map_import_info: OnceCell<Vec<ImportedSourceFile>>,
/// For every definition in this crate, maps its `DefPathHash` to its `DefIndex`.
def_path_hash_map: DefPathHashMapRef<'static>,
/// Likewise for ExpnHash.
expn_hash_map: OnceCell<UnhashMap<ExpnHash, ExpnIndex>>,
/// Used for decoding interpret::AllocIds in a cached & thread-safe manner.
alloc_decoding_state: AllocDecodingState,
/// Caches decoded `DefKey`s.
def_key_cache: Lock<FxHashMap<DefIndex, DefKey>>,
/// Caches decoded `DefPathHash`es.
def_path_hash_cache: Lock<FxHashMap<DefIndex, DefPathHash>>,
// --- Other significant crate properties ---
/// ID of this crate, from the current compilation session's point of view.
cnum: CrateNum,
/// Maps crate IDs as they are were seen from this crate's compilation sessions into
/// IDs as they are seen from the current compilation session.
cnum_map: CrateNumMap,
/// Same ID set as `cnum_map` plus maybe some injected crates like panic runtime.
dependencies: Lock<Vec<CrateNum>>,
/// How to link (or not link) this crate to the currently compiled crate.
dep_kind: Lock<CrateDepKind>,
/// Filesystem location of this crate.
source: CrateSource,
/// Whether or not this crate should be consider a private dependency
/// for purposes of the 'exported_private_dependencies' lint
private_dep: bool,
/// The hash for the host proc macro. Used to support `-Z dual-proc-macro`.
host_hash: Option<Svh>,
/// Additional data used for decoding `HygieneData` (e.g. `SyntaxContext`
/// and `ExpnId`).
/// Note that we store a `HygieneDecodeContext` for each `CrateMetadat`. This is
/// because `SyntaxContext` ids are not globally unique, so we need
/// to track which ids we've decoded on a per-crate basis.
hygiene_context: HygieneDecodeContext,
// --- Data used only for improving diagnostics ---
/// Information about the `extern crate` item or path that caused this crate to be loaded.
/// If this is `None`, then the crate was injected (e.g., by the allocator).
extern_crate: Lock<Option<ExternCrate>>,
}
/// Holds information about a rustc_span::SourceFile imported from another crate.
/// See `imported_source_files()` for more information.
struct ImportedSourceFile {
/// This SourceFile's byte-offset within the source_map of its original crate
original_start_pos: rustc_span::BytePos,
/// The end of this SourceFile within the source_map of its original crate
original_end_pos: rustc_span::BytePos,
/// The imported SourceFile's representation within the local source_map
translated_source_file: Lrc<rustc_span::SourceFile>,
}
pub(super) struct DecodeContext<'a, 'tcx> {
opaque: opaque::Decoder<'a>,
cdata: Option<CrateMetadataRef<'a>>,
blob: &'a MetadataBlob,
sess: Option<&'tcx Session>,
tcx: Option<TyCtxt<'tcx>>,
// Cache the last used source_file for translating spans as an optimization.
last_source_file_index: usize,
lazy_state: LazyState,
// Used for decoding interpret::AllocIds in a cached & thread-safe manner.
alloc_decoding_session: Option<AllocDecodingSession<'a>>,
}
/// Abstract over the various ways one can create metadata decoders.
pub(super) trait Metadata<'a, 'tcx>: Copy {
fn blob(self) -> &'a MetadataBlob;
fn cdata(self) -> Option<CrateMetadataRef<'a>> {
None
}
fn sess(self) -> Option<&'tcx Session> {
None
}
fn tcx(self) -> Option<TyCtxt<'tcx>> {
None
}
fn decoder(self, pos: usize) -> DecodeContext<'a, 'tcx> {
let tcx = self.tcx();
DecodeContext {
opaque: opaque::Decoder::new(self.blob(), pos),
cdata: self.cdata(),
blob: self.blob(),
sess: self.sess().or(tcx.map(|tcx| tcx.sess)),
tcx,
last_source_file_index: 0,
lazy_state: LazyState::NoNode,
alloc_decoding_session: self
.cdata()
.map(|cdata| cdata.cdata.alloc_decoding_state.new_decoding_session()),
}
}
}
impl<'a, 'tcx> Metadata<'a, 'tcx> for &'a MetadataBlob {
#[inline]
fn blob(self) -> &'a MetadataBlob {
self
}
}
impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a MetadataBlob, &'tcx Session) {
#[inline]
fn blob(self) -> &'a MetadataBlob {
self.0
}
#[inline]
fn sess(self) -> Option<&'tcx Session> {
let (_, sess) = self;
Some(sess)
}
}
impl<'a, 'tcx> Metadata<'a, 'tcx> for &'a CrateMetadataRef<'a> {
#[inline]
fn blob(self) -> &'a MetadataBlob {
&self.blob
}
#[inline]
fn cdata(self) -> Option<CrateMetadataRef<'a>> {
Some(*self)
}
}
impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a CrateMetadataRef<'a>, &'tcx Session) {
#[inline]
fn blob(self) -> &'a MetadataBlob {
&self.0.blob
}
#[inline]
fn cdata(self) -> Option<CrateMetadataRef<'a>> {
Some(*self.0)
}
#[inline]
fn sess(self) -> Option<&'tcx Session> {
Some(&self.1)
}
}
impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a CrateMetadataRef<'a>, TyCtxt<'tcx>) {
#[inline]
fn blob(self) -> &'a MetadataBlob {
&self.0.blob
}
#[inline]
fn cdata(self) -> Option<CrateMetadataRef<'a>> {
Some(*self.0)
}
#[inline]
fn tcx(self) -> Option<TyCtxt<'tcx>> {
Some(self.1)
}
}
impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Lazy<T> {
fn decode<M: Metadata<'a, 'tcx>>(self, metadata: M) -> T {
let mut dcx = metadata.decoder(self.position.get());
dcx.lazy_state = LazyState::NodeStart(self.position);
T::decode(&mut dcx).unwrap()
}
}
impl<'a: 'x, 'tcx: 'x, 'x, T: Decodable<DecodeContext<'a, 'tcx>>> Lazy<[T]> {
fn decode<M: Metadata<'a, 'tcx>>(
self,
metadata: M,
) -> impl ExactSizeIterator<Item = T> + Captures<'a> + Captures<'tcx> + 'x {
let mut dcx = metadata.decoder(self.position.get());
dcx.lazy_state = LazyState::NodeStart(self.position);
(0..self.meta).map(move |_| T::decode(&mut dcx).unwrap())
}
}
impl<'a, 'tcx> DecodeContext<'a, 'tcx> {
#[inline]
fn tcx(&self) -> TyCtxt<'tcx> {
debug_assert!(self.tcx.is_some(), "missing TyCtxt in DecodeContext");
self.tcx.unwrap()
}
#[inline]
pub fn blob(&self) -> &'a MetadataBlob {
self.blob
}
#[inline]
pub fn cdata(&self) -> CrateMetadataRef<'a> {
debug_assert!(self.cdata.is_some(), "missing CrateMetadata in DecodeContext");
self.cdata.unwrap()
}
fn map_encoded_cnum_to_current(&self, cnum: CrateNum) -> CrateNum {
if cnum == LOCAL_CRATE { self.cdata().cnum } else { self.cdata().cnum_map[cnum] }
}
fn read_lazy_with_meta<T: ?Sized + LazyMeta>(
&mut self,
meta: T::Meta,
) -> Result<Lazy<T>, <Self as Decoder>::Error> {
let min_size = T::min_size(meta);
let distance = self.read_usize()?;
let position = match self.lazy_state {
LazyState::NoNode => bug!("read_lazy_with_meta: outside of a metadata node"),
LazyState::NodeStart(start) => {
let start = start.get();
assert!(distance + min_size <= start);
start - distance - min_size
}
LazyState::Previous(last_min_end) => last_min_end.get() + distance,
};
self.lazy_state = LazyState::Previous(NonZeroUsize::new(position + min_size).unwrap());
Ok(Lazy::from_position_and_meta(NonZeroUsize::new(position).unwrap(), meta))
}
#[inline]
pub fn read_raw_bytes(&mut self, len: usize) -> &'a [u8] {
self.opaque.read_raw_bytes(len)
}
}
impl<'a, 'tcx> TyDecoder<'tcx> for DecodeContext<'a, 'tcx> {
const CLEAR_CROSS_CRATE: bool = true;
#[inline]
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx.expect("missing TyCtxt in DecodeContext")
}
#[inline]
fn peek_byte(&self) -> u8 {
self.opaque.data[self.opaque.position()]
}
#[inline]
fn position(&self) -> usize {
self.opaque.position()
}
fn cached_ty_for_shorthand<F>(
&mut self,
shorthand: usize,
or_insert_with: F,
) -> Result<Ty<'tcx>, Self::Error>
where
F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>,
{
let tcx = self.tcx();
let key = ty::CReaderCacheKey { cnum: Some(self.cdata().cnum), pos: shorthand };
if let Some(&ty) = tcx.ty_rcache.borrow().get(&key) {
return Ok(ty);
}
let ty = or_insert_with(self)?;
tcx.ty_rcache.borrow_mut().insert(key, ty);
Ok(ty)
}
fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
where
F: FnOnce(&mut Self) -> R,
{
let new_opaque = opaque::Decoder::new(self.opaque.data, pos);
let old_opaque = mem::replace(&mut self.opaque, new_opaque);
let old_state = mem::replace(&mut self.lazy_state, LazyState::NoNode);
let r = f(self);
self.opaque = old_opaque;
self.lazy_state = old_state;
r
}
fn decode_alloc_id(&mut self) -> Result<rustc_middle::mir::interpret::AllocId, Self::Error> {
if let Some(alloc_decoding_session) = self.alloc_decoding_session {
alloc_decoding_session.decode_alloc_id(self)
} else {
bug!("Attempting to decode interpret::AllocId without CrateMetadata")
}
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum {
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<CrateNum, String> {
let cnum = CrateNum::from_u32(d.read_u32()?);
Ok(d.map_encoded_cnum_to_current(cnum))
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefIndex {
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<DefIndex, String> {
Ok(DefIndex::from_u32(d.read_u32()?))
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex {
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<ExpnIndex, String> {
Ok(ExpnIndex::from_u32(d.read_u32()?))
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SyntaxContext {
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<SyntaxContext, String> {
let cdata = decoder.cdata();
let sess = decoder.sess.unwrap();
let cname = cdata.root.name;
rustc_span::hygiene::decode_syntax_context(decoder, &cdata.hygiene_context, |_, id| {
debug!("SpecializedDecoder<SyntaxContext>: decoding {}", id);
Ok(cdata
.root
.syntax_contexts
.get(&cdata, id)
.unwrap_or_else(|| panic!("Missing SyntaxContext {:?} for crate {:?}", id, cname))
.decode((&cdata, sess)))
})
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnId {
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<ExpnId, String> {
let local_cdata = decoder.cdata();
let sess = decoder.sess.unwrap();
let cnum = CrateNum::decode(decoder)?;
let index = u32::decode(decoder)?;
let expn_id = rustc_span::hygiene::decode_expn_id(cnum, index, |expn_id| {
let ExpnId { krate: cnum, local_id: index } = expn_id;
// Lookup local `ExpnData`s in our own crate data. Foreign `ExpnData`s
// are stored in the owning crate, to avoid duplication.
debug_assert_ne!(cnum, LOCAL_CRATE);
let crate_data = if cnum == local_cdata.cnum {
local_cdata
} else {
local_cdata.cstore.get_crate_data(cnum)
};
let expn_data = crate_data
.root
.expn_data
.get(&crate_data, index)
.unwrap()
.decode((&crate_data, sess));
let expn_hash = crate_data
.root
.expn_hashes
.get(&crate_data, index)
.unwrap()
.decode((&crate_data, sess));
(expn_data, expn_hash)
});
Ok(expn_id)
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Span, String> {
let ctxt = SyntaxContext::decode(decoder)?;
let tag = u8::decode(decoder)?;
if tag == TAG_PARTIAL_SPAN {
return Ok(DUMMY_SP.with_ctxt(ctxt));
}
debug_assert!(tag == TAG_VALID_SPAN_LOCAL || tag == TAG_VALID_SPAN_FOREIGN);
let lo = BytePos::decode(decoder)?;
let len = BytePos::decode(decoder)?;
let hi = lo + len;
let sess = if let Some(sess) = decoder.sess {
sess
} else {
bug!("Cannot decode Span without Session.")
};
// There are two possibilities here:
// 1. This is a 'local span', which is located inside a `SourceFile`
// that came from this crate. In this case, we use the source map data
// encoded in this crate. This branch should be taken nearly all of the time.
// 2. This is a 'foreign span', which is located inside a `SourceFile`
// that came from a *different* crate (some crate upstream of the one
// whose metadata we're looking at). For example, consider this dependency graph:
//
// A -> B -> C
//
// Suppose that we're currently compiling crate A, and start deserializing
// metadata from crate B. When we deserialize a Span from crate B's metadata,
// there are two posibilites:
//
// 1. The span references a file from crate B. This makes it a 'local' span,
// which means that we can use crate B's serialized source map information.
// 2. The span references a file from crate C. This makes it a 'foreign' span,
// which means we need to use Crate *C* (not crate B) to determine the source
// map information. We only record source map information for a file in the
// crate that 'owns' it, so deserializing a Span may require us to look at
// a transitive dependency.
//
// When we encode a foreign span, we adjust its 'lo' and 'high' values
// to be based on the *foreign* crate (e.g. crate C), not the crate
// we are writing metadata for (e.g. crate B). This allows us to
// treat the 'local' and 'foreign' cases almost identically during deserialization:
// we can call `imported_source_files` for the proper crate, and binary search
// through the returned slice using our span.
let imported_source_files = if tag == TAG_VALID_SPAN_LOCAL {
decoder.cdata().imported_source_files(sess)
} else {
// When we encode a proc-macro crate, all `Span`s should be encoded
// with `TAG_VALID_SPAN_LOCAL`
if decoder.cdata().root.is_proc_macro_crate() {
// Decode `CrateNum` as u32 - using `CrateNum::decode` will ICE
// since we don't have `cnum_map` populated.
let cnum = u32::decode(decoder)?;
panic!(
"Decoding of crate {:?} tried to access proc-macro dep {:?}",
decoder.cdata().root.name,
cnum
);
}
// tag is TAG_VALID_SPAN_FOREIGN, checked by `debug_assert` above
let cnum = CrateNum::decode(decoder)?;
debug!(
"SpecializedDecoder<Span>::specialized_decode: loading source files from cnum {:?}",
cnum
);
// Decoding 'foreign' spans should be rare enough that it's
// not worth it to maintain a per-CrateNum cache for `last_source_file_index`.
// We just set it to 0, to ensure that we don't try to access something out
// of bounds for our initial 'guess'
decoder.last_source_file_index = 0;
let foreign_data = decoder.cdata().cstore.get_crate_data(cnum);
foreign_data.imported_source_files(sess)
};
let source_file = {
// Optimize for the case that most spans within a translated item
// originate from the same source_file.
let last_source_file = &imported_source_files[decoder.last_source_file_index];
if lo >= last_source_file.original_start_pos && lo <= last_source_file.original_end_pos
{
last_source_file
} else {
let index = imported_source_files
.binary_search_by_key(&lo, |source_file| source_file.original_start_pos)
.unwrap_or_else(|index| index - 1);
// Don't try to cache the index for foreign spans,
// as this would require a map from CrateNums to indices
if tag == TAG_VALID_SPAN_LOCAL {
decoder.last_source_file_index = index;
}
&imported_source_files[index]
}
};
// Make sure our binary search above is correct.
debug_assert!(
lo >= source_file.original_start_pos && lo <= source_file.original_end_pos,
"Bad binary search: lo={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
lo,
source_file.original_start_pos,
source_file.original_end_pos
);
// Make sure we correctly filtered out invalid spans during encoding
debug_assert!(
hi >= source_file.original_start_pos && hi <= source_file.original_end_pos,
"Bad binary search: hi={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
hi,
source_file.original_start_pos,
source_file.original_end_pos
);
let lo =
(lo + source_file.translated_source_file.start_pos) - source_file.original_start_pos;
let hi =
(hi + source_file.translated_source_file.start_pos) - source_file.original_start_pos;
// Do not try to decode parent for foreign spans.
Ok(Span::new(lo, hi, ctxt, None))
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [thir::abstract_const::Node<'tcx>] {
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> {
ty::codec::RefDecodable::decode(d)
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] {
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> {
ty::codec::RefDecodable::decode(d)
}
}
impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Decodable<DecodeContext<'a, 'tcx>>
for Lazy<T>
{
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> {
decoder.read_lazy_with_meta(())
}
}
impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Decodable<DecodeContext<'a, 'tcx>>
for Lazy<[T]>
{
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> {
let len = decoder.read_usize()?;
if len == 0 { Ok(Lazy::empty()) } else { decoder.read_lazy_with_meta(len) }
}
}
impl<'a, 'tcx, I: Idx, T: Decodable<DecodeContext<'a, 'tcx>>> Decodable<DecodeContext<'a, 'tcx>>
for Lazy<Table<I, T>>
where
Option<T>: FixedSizeEncoding,
{
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> {
let len = decoder.read_usize()?;
decoder.read_lazy_with_meta(len)
}
}
implement_ty_decoder!(DecodeContext<'a, 'tcx>);
impl MetadataBlob {
crate fn new(metadata_ref: MetadataRef) -> MetadataBlob {
MetadataBlob(Lrc::new(metadata_ref))
}
crate fn is_compatible(&self) -> bool {
self.blob().starts_with(METADATA_HEADER)
}
crate fn get_rustc_version(&self) -> String {
Lazy::<String>::from_position(NonZeroUsize::new(METADATA_HEADER.len() + 4).unwrap())
.decode(self)
}
crate fn get_root(&self) -> CrateRoot<'tcx> {
let slice = &self.blob()[..];
let offset = METADATA_HEADER.len();
let pos = (((slice[offset + 0] as u32) << 24)
| ((slice[offset + 1] as u32) << 16)
| ((slice[offset + 2] as u32) << 8)
| ((slice[offset + 3] as u32) << 0)) as usize;
Lazy::<CrateRoot<'tcx>>::from_position(NonZeroUsize::new(pos).unwrap()).decode(self)
}
crate fn list_crate_metadata(&self, out: &mut dyn io::Write) -> io::Result<()> {
let root = self.get_root();
writeln!(out, "Crate info:")?;
writeln!(out, "name {}{}", root.name, root.extra_filename)?;
writeln!(out, "hash {} stable_crate_id {:?}", root.hash, root.stable_crate_id)?;
writeln!(out, "proc_macro {:?}", root.proc_macro_data.is_some())?;
writeln!(out, "=External Dependencies=")?;
for (i, dep) in root.crate_deps.decode(self).enumerate() {
writeln!(
out,
"{} {}{} hash {} host_hash {:?} kind {:?}",
i + 1,
dep.name,
dep.extra_filename,
dep.hash,
dep.host_hash,
dep.kind
)?;
}
write!(out, "\n")?;
Ok(())
}
}
impl CrateRoot<'_> {
crate fn is_proc_macro_crate(&self) -> bool {
self.proc_macro_data.is_some()
}
crate fn name(&self) -> Symbol {
self.name
}
crate fn hash(&self) -> Svh {
self.hash
}
crate fn stable_crate_id(&self) -> StableCrateId {
self.stable_crate_id
}
crate fn triple(&self) -> &TargetTriple {
&self.triple
}
crate fn decode_crate_deps(
&self,
metadata: &'a MetadataBlob,
) -> impl ExactSizeIterator<Item = CrateDep> + Captures<'a> {
self.crate_deps.decode(metadata)
}
}
impl<'a, 'tcx> CrateMetadataRef<'a> {
fn raw_proc_macro(&self, id: DefIndex) -> &ProcMacro {
// DefIndex's in root.proc_macro_data have a one-to-one correspondence
// with items in 'raw_proc_macros'.
let pos = self
.root
.proc_macro_data
.as_ref()
.unwrap()
.macros
.decode(self)
.position(|i| i == id)
.unwrap();
&self.raw_proc_macros.unwrap()[pos]
}
fn try_item_ident(&self, item_index: DefIndex, sess: &Session) -> Result<Ident, String> {
let name = self
.def_key(item_index)
.disambiguated_data
.data
.get_opt_name()
.ok_or_else(|| format!("Missing opt name for {:?}", item_index))?;
let span = self
.root
.tables
.ident_span
.get(self, item_index)
.ok_or_else(|| format!("Missing ident span for {:?} ({:?})", name, item_index))?
.decode((self, sess));
Ok(Ident::new(name, span))
}
fn item_ident(&self, item_index: DefIndex, sess: &Session) -> Ident {
self.try_item_ident(item_index, sess).unwrap()
}
fn maybe_kind(&self, item_id: DefIndex) -> Option<EntryKind> {
self.root.tables.kind.get(self, item_id).map(|k| k.decode(self))
}
fn kind(&self, item_id: DefIndex) -> EntryKind {
self.maybe_kind(item_id).unwrap_or_else(|| {
bug!(
"CrateMetadata::kind({:?}): id not found, in crate {:?} with number {}",
item_id,
self.root.name,
self.cnum,
)
})
}
fn def_kind(&self, item_id: DefIndex) -> DefKind {
self.root.tables.def_kind.get(self, item_id).map(|k| k.decode(self)).unwrap_or_else(|| {
bug!(
"CrateMetadata::def_kind({:?}): id not found, in crate {:?} with number {}",
item_id,
self.root.name,
self.cnum,
)
})
}
fn get_span(&self, index: DefIndex, sess: &Session) -> Span {
self.root
.tables
.span
.get(self, index)
.unwrap_or_else(|| panic!("Missing span for {:?}", index))
.decode((self, sess))
}
fn load_proc_macro(&self, id: DefIndex, sess: &Session) -> SyntaxExtension {
let (name, kind, helper_attrs) = match *self.raw_proc_macro(id) {
ProcMacro::CustomDerive { trait_name, attributes, client } => {
let helper_attrs =
attributes.iter().cloned().map(Symbol::intern).collect::<Vec<_>>();
(
trait_name,
SyntaxExtensionKind::Derive(Box::new(ProcMacroDerive { client })),
helper_attrs,
)
}
ProcMacro::Attr { name, client } => {
(name, SyntaxExtensionKind::Attr(Box::new(AttrProcMacro { client })), Vec::new())
}
ProcMacro::Bang { name, client } => {
(name, SyntaxExtensionKind::Bang(Box::new(BangProcMacro { client })), Vec::new())
}
};
let attrs: Vec<_> = self.get_item_attrs(id, sess).collect();
SyntaxExtension::new(
sess,
kind,
self.get_span(id, sess),
helper_attrs,
self.root.edition,
Symbol::intern(name),
&attrs,
)
}
fn get_trait_def(&self, item_id: DefIndex, sess: &Session) -> ty::TraitDef {
match self.kind(item_id) {
EntryKind::Trait(data) => {
let data = data.decode((self, sess));
ty::TraitDef::new(
self.local_def_id(item_id),
data.unsafety,
data.paren_sugar,
data.has_auto_impl,
data.is_marker,
data.skip_array_during_method_dispatch,
data.specialization_kind,
self.def_path_hash(item_id),
)
}
EntryKind::TraitAlias => ty::TraitDef::new(
self.local_def_id(item_id),
hir::Unsafety::Normal,
false,
false,
false,
false,
ty::trait_def::TraitSpecializationKind::None,
self.def_path_hash(item_id),
),
_ => bug!("def-index does not refer to trait or trait alias"),
}
}
fn get_variant(
&self,
kind: &EntryKind,
index: DefIndex,
parent_did: DefId,
sess: &Session,
) -> ty::VariantDef {
let data = match kind {
EntryKind::Variant(data) | EntryKind::Struct(data, _) | EntryKind::Union(data, _) => {
data.decode(self)
}
_ => bug!(),
};
let adt_kind = match kind {
EntryKind::Variant(_) => ty::AdtKind::Enum,
EntryKind::Struct(..) => ty::AdtKind::Struct,
EntryKind::Union(..) => ty::AdtKind::Union,
_ => bug!(),
};
let variant_did =
if adt_kind == ty::AdtKind::Enum { Some(self.local_def_id(index)) } else { None };
let ctor_did = data.ctor.map(|index| self.local_def_id(index));
ty::VariantDef::new(
self.item_ident(index, sess),
variant_did,
ctor_did,
data.discr,
self.root
.tables
.children
.get(self, index)
.unwrap_or_else(Lazy::empty)
.decode(self)
.map(|index| ty::FieldDef {
did: self.local_def_id(index),
ident: self.item_ident(index, sess),
vis: self.get_visibility(index),
})
.collect(),
data.ctor_kind,
adt_kind,
parent_did,
false,
data.is_non_exhaustive,
)
}
fn get_adt_def(&self, item_id: DefIndex, tcx: TyCtxt<'tcx>) -> &'tcx ty::AdtDef {
let kind = self.kind(item_id);
let did = self.local_def_id(item_id);
let (adt_kind, repr) = match kind {
EntryKind::Enum(repr) => (ty::AdtKind::Enum, repr),
EntryKind::Struct(_, repr) => (ty::AdtKind::Struct, repr),
EntryKind::Union(_, repr) => (ty::AdtKind::Union, repr),
_ => bug!("get_adt_def called on a non-ADT {:?}", did),
};
let variants = if let ty::AdtKind::Enum = adt_kind {
self.root
.tables
.children
.get(self, item_id)
.unwrap_or_else(Lazy::empty)
.decode(self)
.map(|index| self.get_variant(&self.kind(index), index, did, tcx.sess))
.collect()
} else {
std::iter::once(self.get_variant(&kind, item_id, did, tcx.sess)).collect()
};
tcx.alloc_adt_def(did, adt_kind, variants, repr)
}
fn get_explicit_predicates(
&self,
item_id: DefIndex,
tcx: TyCtxt<'tcx>,
) -> ty::GenericPredicates<'tcx> {
self.root.tables.explicit_predicates.get(self, item_id).unwrap().decode((self, tcx))
}
fn get_inferred_outlives(
&self,
item_id: DefIndex,
tcx: TyCtxt<'tcx>,
) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
self.root
.tables
.inferred_outlives
.get(self, item_id)
.map(|predicates| tcx.arena.alloc_from_iter(predicates.decode((self, tcx))))
.unwrap_or_default()
}
fn get_super_predicates(
&self,
item_id: DefIndex,
tcx: TyCtxt<'tcx>,
) -> ty::GenericPredicates<'tcx> {
self.root.tables.super_predicates.get(self, item_id).unwrap().decode((self, tcx))
}
fn get_explicit_item_bounds(
&self,
item_id: DefIndex,
tcx: TyCtxt<'tcx>,
) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
self.root
.tables
.explicit_item_bounds
.get(self, item_id)
.map(|bounds| tcx.arena.alloc_from_iter(bounds.decode((self, tcx))))
.unwrap_or_default()
}
fn get_generics(&self, item_id: DefIndex, sess: &Session) -> ty::Generics {
self.root.tables.generics.get(self, item_id).unwrap().decode((self, sess))
}
fn get_type(&self, id: DefIndex, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
self.root
.tables
.ty
.get(self, id)
.unwrap_or_else(|| panic!("Not a type: {:?}", id))
.decode((self, tcx))
}
fn get_stability(&self, id: DefIndex) -> Option<attr::Stability> {
self.root.tables.stability.get(self, id).map(|stab| stab.decode(self))
}
fn get_const_stability(&self, id: DefIndex) -> Option<attr::ConstStability> {
self.root.tables.const_stability.get(self, id).map(|stab| stab.decode(self))
}
fn get_deprecation(&self, id: DefIndex) -> Option<attr::Deprecation> {
self.root.tables.deprecation.get(self, id).map(|depr| depr.decode(self))
}
fn get_visibility(&self, id: DefIndex) -> ty::Visibility {
self.root.tables.visibility.get(self, id).unwrap().decode(self)
}
fn get_impl_data(&self, id: DefIndex) -> ImplData {
match self.kind(id) {
EntryKind::Impl(data) => data.decode(self),
_ => bug!(),
}
}
fn get_parent_impl(&self, id: DefIndex) -> Option<DefId> {
self.get_impl_data(id).parent_impl
}
fn get_impl_polarity(&self, id: DefIndex) -> ty::ImplPolarity {
self.get_impl_data(id).polarity
}
fn get_impl_defaultness(&self, id: DefIndex) -> hir::Defaultness {
self.get_impl_data(id).defaultness
}
fn get_impl_constness(&self, id: DefIndex) -> hir::Constness {
self.get_impl_data(id).constness
}
fn get_coerce_unsized_info(&self, id: DefIndex) -> Option<ty::adjustment::CoerceUnsizedInfo> {
self.get_impl_data(id).coerce_unsized_info
}
fn get_impl_trait(&self, id: DefIndex, tcx: TyCtxt<'tcx>) -> Option<ty::TraitRef<'tcx>> {
self.root.tables.impl_trait_ref.get(self, id).map(|tr| tr.decode((self, tcx)))
}
fn get_expn_that_defined(&self, id: DefIndex, sess: &Session) -> ExpnId {
self.root.tables.expn_that_defined.get(self, id).unwrap().decode((self, sess))
}
fn get_const_param_default(
&self,
tcx: TyCtxt<'tcx>,
id: DefIndex,
) -> rustc_middle::ty::Const<'tcx> {
self.root.tables.const_defaults.get(self, id).unwrap().decode((self, tcx))
}
/// Iterates over all the stability attributes in the given crate.
fn get_lib_features(&self, tcx: TyCtxt<'tcx>) -> &'tcx [(Symbol, Option<Symbol>)] {
// FIXME: For a proc macro crate, not sure whether we should return the "host"
// features or an empty Vec. Both don't cause ICEs.
tcx.arena.alloc_from_iter(self.root.lib_features.decode(self))
}
/// Iterates over the language items in the given crate.
fn get_lang_items(&self, tcx: TyCtxt<'tcx>) -> &'tcx [(DefId, usize)] {
if self.root.is_proc_macro_crate() {
// Proc macro crates do not export any lang-items to the target.
&[]
} else {
tcx.arena.alloc_from_iter(
self.root
.lang_items
.decode(self)
.map(|(def_index, index)| (self.local_def_id(def_index), index)),
)
}
}
/// Iterates over the diagnostic items in the given crate.
fn get_diagnostic_items(&self) -> DiagnosticItems {
if self.root.is_proc_macro_crate() {
// Proc macro crates do not export any diagnostic-items to the target.
Default::default()
} else {
let mut id_to_name = FxHashMap::default();
let name_to_id = self
.root
.diagnostic_items
.decode(self)
.map(|(name, def_index)| {
let id = self.local_def_id(def_index);
id_to_name.insert(id, name);
(name, id)
})
.collect();
DiagnosticItems { id_to_name, name_to_id }
}
}
/// Iterates over each child of the given item.
fn each_child_of_item(&self, id: DefIndex, mut callback: impl FnMut(Export), sess: &Session) {
if let Some(data) = &self.root.proc_macro_data {
/* If we are loading as a proc macro, we want to return the view of this crate
* as a proc macro crate.
*/
if id == CRATE_DEF_INDEX {
let macros = data.macros.decode(self);
for def_index in macros {
let raw_macro = self.raw_proc_macro(def_index);
let res = Res::Def(
DefKind::Macro(macro_kind(raw_macro)),
self.local_def_id(def_index),
);
let ident = self.item_ident(def_index, sess);
callback(Export { ident, res, vis: ty::Visibility::Public, span: ident.span });
}
}
return;
}
// Find the item.
let kind = match self.maybe_kind(id) {
None => return,
Some(kind) => kind,
};
// Iterate over all children.
let macros_only = self.dep_kind.lock().macros_only();
if !macros_only {
let children = self.root.tables.children.get(self, id).unwrap_or_else(Lazy::empty);
for child_index in children.decode((self, sess)) {
// Get the item.
let child_kind = match self.maybe_kind(child_index) {
Some(child_kind) => child_kind,
None => continue,
};
// Hand off the item to the callback.
match child_kind {
// FIXME(eddyb) Don't encode these in children.
EntryKind::ForeignMod => {
let child_children = self
.root
.tables
.children
.get(self, child_index)
.unwrap_or_else(Lazy::empty);
for child_index in child_children.decode((self, sess)) {
let kind = self.def_kind(child_index);
callback(Export {
res: Res::Def(kind, self.local_def_id(child_index)),
ident: self.item_ident(child_index, sess),
vis: self.get_visibility(child_index),
span: self
.root
.tables
.span
.get(self, child_index)
.unwrap()
.decode((self, sess)),
});
}
continue;
}
EntryKind::Impl(_) => continue,
_ => {}
}
let def_key = self.def_key(child_index);
if def_key.disambiguated_data.data.get_opt_name().is_some() {
let span = self.get_span(child_index, sess);
let kind = self.def_kind(child_index);
let ident = self.item_ident(child_index, sess);
let vis = self.get_visibility(child_index);
let def_id = self.local_def_id(child_index);
let res = Res::Def(kind, def_id);
// FIXME: Macros are currently encoded twice, once as items and once as
// reexports. We ignore the items here and only use the reexports.
if !matches!(kind, DefKind::Macro(..)) {
callback(Export { res, ident, vis, span });
}
// For non-re-export structs and variants add their constructors to children.
// Re-export lists automatically contain constructors when necessary.
match kind {
DefKind::Struct => {
if let Some(ctor_def_id) = self.get_ctor_def_id(child_index) {
let ctor_kind = self.get_ctor_kind(child_index);
let ctor_res =
Res::Def(DefKind::Ctor(CtorOf::Struct, ctor_kind), ctor_def_id);
let vis = self.get_visibility(ctor_def_id.index);
callback(Export { res: ctor_res, vis, ident, span });
}
}
DefKind::Variant => {
// Braced variants, unlike structs, generate unusable names in
// value namespace, they are reserved for possible future use.
// It's ok to use the variant's id as a ctor id since an
// error will be reported on any use of such resolution anyway.
let ctor_def_id = self.get_ctor_def_id(child_index).unwrap_or(def_id);
let ctor_kind = self.get_ctor_kind(child_index);
let ctor_res =
Res::Def(DefKind::Ctor(CtorOf::Variant, ctor_kind), ctor_def_id);
let mut vis = self.get_visibility(ctor_def_id.index);
if ctor_def_id == def_id && vis == ty::Visibility::Public {
// For non-exhaustive variants lower the constructor visibility to
// within the crate. We only need this for fictive constructors,
// for other constructors correct visibilities
// were already encoded in metadata.
let mut attrs = self.get_item_attrs(def_id.index, sess);
if attrs.any(|item| item.has_name(sym::non_exhaustive)) {
let crate_def_id = self.local_def_id(CRATE_DEF_INDEX);
vis = ty::Visibility::Restricted(crate_def_id);
}
}
callback(Export { res: ctor_res, ident, vis, span });
}
_ => {}
}
}
}
}
if let EntryKind::Mod(data) = kind {
for exp in data.decode((self, sess)).reexports.decode((self, sess)) {
match exp.res {
Res::Def(DefKind::Macro(..), _) => {}
_ if macros_only => continue,
_ => {}
}
callback(exp);
}
}
}
fn is_ctfe_mir_available(&self, id: DefIndex) -> bool {
self.root.tables.mir_for_ctfe.get(self, id).is_some()
}
fn is_item_mir_available(&self, id: DefIndex) -> bool {
self.root.tables.mir.get(self, id).is_some()
}
fn module_expansion(&self, id: DefIndex, sess: &Session) -> ExpnId {
if let EntryKind::Mod(m) = self.kind(id) {
m.decode((self, sess)).expansion
} else {
panic!("Expected module, found {:?}", self.local_def_id(id))
}
}
fn get_optimized_mir(&self, tcx: TyCtxt<'tcx>, id: DefIndex) -> Body<'tcx> {
self.root
.tables
.mir
.get(self, id)
.unwrap_or_else(|| {
bug!("get_optimized_mir: missing MIR for `{:?}`", self.local_def_id(id))
})
.decode((self, tcx))
}
fn get_mir_for_ctfe(&self, tcx: TyCtxt<'tcx>, id: DefIndex) -> Body<'tcx> {
self.root
.tables
.mir_for_ctfe
.get(self, id)
.unwrap_or_else(|| {
bug!("get_mir_for_ctfe: missing MIR for `{:?}`", self.local_def_id(id))
})
.decode((self, tcx))
}
fn get_thir_abstract_const(
&self,
tcx: TyCtxt<'tcx>,
id: DefIndex,
) -> Result<Option<&'tcx [thir::abstract_const::Node<'tcx>]>, ErrorReported> {
self.root
.tables
.thir_abstract_consts
.get(self, id)
.map_or(Ok(None), |v| Ok(Some(v.decode((self, tcx)))))
}
fn get_unused_generic_params(&self, id: DefIndex) -> FiniteBitSet<u32> {
self.root
.tables
.unused_generic_params
.get(self, id)
.map(|params| params.decode(self))
.unwrap_or_default()
}
fn get_promoted_mir(&self, tcx: TyCtxt<'tcx>, id: DefIndex) -> IndexVec<Promoted, Body<'tcx>> {
self.root
.tables
.promoted_mir
.get(self, id)
.unwrap_or_else(|| {
bug!("get_promoted_mir: missing MIR for `{:?}`", self.local_def_id(id))
})
.decode((self, tcx))
}
fn mir_const_qualif(&self, id: DefIndex) -> mir::ConstQualifs {
match self.kind(id) {
EntryKind::AnonConst(qualif, _)
| EntryKind::Const(qualif, _)
| EntryKind::AssocConst(
AssocContainer::ImplDefault
| AssocContainer::ImplFinal
| AssocContainer::TraitWithDefault,
qualif,
_,
) => qualif,
_ => bug!("mir_const_qualif: unexpected kind"),
}
}
fn get_associated_item(&self, id: DefIndex, sess: &Session) -> ty::AssocItem {
let def_key = self.def_key(id);
let parent = self.local_def_id(def_key.parent.unwrap());
let ident = self.item_ident(id, sess);
let (kind, container, has_self) = match self.kind(id) {
EntryKind::AssocConst(container, _, _) => (ty::AssocKind::Const, container, false),
EntryKind::AssocFn(data) => {
let data = data.decode(self);
(ty::AssocKind::Fn, data.container, data.has_self)
}
EntryKind::AssocType(container) => (ty::AssocKind::Type, container, false),
_ => bug!("cannot get associated-item of `{:?}`", def_key),
};
ty::AssocItem {
ident,
kind,
vis: self.get_visibility(id),
defaultness: container.defaultness(),
def_id: self.local_def_id(id),
container: container.with_def_id(parent),
fn_has_self_parameter: has_self,
}
}
fn get_item_variances(&'a self, id: DefIndex) -> impl Iterator<Item = ty::Variance> + 'a {
self.root.tables.variances.get(self, id).unwrap_or_else(Lazy::empty).decode(self)
}
fn get_ctor_kind(&self, node_id: DefIndex) -> CtorKind {
match self.kind(node_id) {
EntryKind::Struct(data, _) | EntryKind::Union(data, _) | EntryKind::Variant(data) => {
data.decode(self).ctor_kind
}
_ => CtorKind::Fictive,
}
}
fn get_ctor_def_id(&self, node_id: DefIndex) -> Option<DefId> {
match self.kind(node_id) {
EntryKind::Struct(data, _) => {
data.decode(self).ctor.map(|index| self.local_def_id(index))
}
EntryKind::Variant(data) => {
data.decode(self).ctor.map(|index| self.local_def_id(index))
}
_ => None,
}
}
fn get_item_attrs(
&'a self,
node_id: DefIndex,
sess: &'a Session,
) -> impl Iterator<Item = ast::Attribute> + 'a {
// The attributes for a tuple struct/variant are attached to the definition, not the ctor;
// we assume that someone passing in a tuple struct ctor is actually wanting to
// look at the definition
let def_key = self.def_key(node_id);
let item_id = if def_key.disambiguated_data.data == DefPathData::Ctor {
def_key.parent.unwrap()
} else {
node_id
};
self.root
.tables
.attributes
.get(self, item_id)
.unwrap_or_else(Lazy::empty)
.decode((self, sess))
}
fn get_struct_field_names(&self, id: DefIndex, sess: &Session) -> Vec<Spanned<Symbol>> {
self.root
.tables
.children
.get(self, id)
.unwrap_or_else(Lazy::empty)
.decode(self)
.map(|index| respan(self.get_span(index, sess), self.item_ident(index, sess).name))
.collect()
}
fn get_struct_field_visibilities(&self, id: DefIndex) -> Vec<Visibility> {
self.root
.tables
.children
.get(self, id)
.unwrap_or_else(Lazy::empty)
.decode(self)
.map(|field_index| self.get_visibility(field_index))
.collect()
}
fn get_inherent_implementations_for_type(
&self,
tcx: TyCtxt<'tcx>,
id: DefIndex,
) -> &'tcx [DefId] {
tcx.arena.alloc_from_iter(
self.root
.tables
.inherent_impls
.get(self, id)
.unwrap_or_else(Lazy::empty)
.decode(self)
.map(|index| self.local_def_id(index)),
)
}
fn get_implementations_for_trait(
&self,
tcx: TyCtxt<'tcx>,
filter: Option<DefId>,
) -> &'tcx [(DefId, Option<ty::fast_reject::SimplifiedType>)] {
if self.root.is_proc_macro_crate() {
// proc-macro crates export no trait impls.
return &[];
}
if let Some(def_id) = filter {
// Do a reverse lookup beforehand to avoid touching the crate_num
// hash map in the loop below.
let filter = match self.reverse_translate_def_id(def_id) {
Some(def_id) => (def_id.krate.as_u32(), def_id.index),
None => return &[],
};
if let Some(impls) = self.trait_impls.get(&filter) {
tcx.arena.alloc_from_iter(
impls.decode(self).map(|(idx, simplified_self_ty)| {
(self.local_def_id(idx), simplified_self_ty)
}),
)
} else {
&[]
}
} else {
tcx.arena.alloc_from_iter(self.trait_impls.values().flat_map(|impls| {
impls
.decode(self)
.map(|(idx, simplified_self_ty)| (self.local_def_id(idx), simplified_self_ty))
}))
}
}
fn get_trait_of_item(&self, id: DefIndex) -> Option<DefId> {
let def_key = self.def_key(id);
match def_key.disambiguated_data.data {
DefPathData::TypeNs(..) | DefPathData::ValueNs(..) => (),
// Not an associated item
_ => return None,
}
def_key.parent.and_then(|parent_index| match self.kind(parent_index) {
EntryKind::Trait(_) | EntryKind::TraitAlias => Some(self.local_def_id(parent_index)),
_ => None,
})
}
fn get_native_libraries(&self, sess: &Session) -> Vec<NativeLib> {
if self.root.is_proc_macro_crate() {
// Proc macro crates do not have any *target* native libraries.
vec![]
} else {
self.root.native_libraries.decode((self, sess)).collect()
}
}
fn get_proc_macro_quoted_span(&self, index: usize, sess: &Session) -> Span {
self.root
.tables
.proc_macro_quoted_spans
.get(self, index)
.unwrap_or_else(|| panic!("Missing proc macro quoted span: {:?}", index))
.decode((self, sess))
}
fn get_foreign_modules(&self, tcx: TyCtxt<'tcx>) -> Lrc<FxHashMap<DefId, ForeignModule>> {
if self.root.is_proc_macro_crate() {
// Proc macro crates do not have any *target* foreign modules.
Lrc::new(FxHashMap::default())
} else {
let modules: FxHashMap<DefId, ForeignModule> =
self.root.foreign_modules.decode((self, tcx.sess)).map(|m| (m.def_id, m)).collect();
Lrc::new(modules)
}
}
fn get_dylib_dependency_formats(
&self,
tcx: TyCtxt<'tcx>,
) -> &'tcx [(CrateNum, LinkagePreference)] {
tcx.arena.alloc_from_iter(
self.root.dylib_dependency_formats.decode(self).enumerate().flat_map(|(i, link)| {
let cnum = CrateNum::new(i + 1);
link.map(|link| (self.cnum_map[cnum], link))
}),
)
}
fn get_missing_lang_items(&self, tcx: TyCtxt<'tcx>) -> &'tcx [lang_items::LangItem] {
if self.root.is_proc_macro_crate() {
// Proc macro crates do not depend on any target weak lang-items.
&[]
} else {
tcx.arena.alloc_from_iter(self.root.lang_items_missing.decode(self))
}
}
fn get_fn_param_names(&self, tcx: TyCtxt<'tcx>, id: DefIndex) -> &'tcx [Ident] {
let param_names = match self.kind(id) {
EntryKind::Fn(data) | EntryKind::ForeignFn(data) => data.decode(self).param_names,
EntryKind::AssocFn(data) => data.decode(self).fn_data.param_names,
_ => Lazy::empty(),
};
tcx.arena.alloc_from_iter(param_names.decode((self, tcx)))
}
fn exported_symbols(
&self,
tcx: TyCtxt<'tcx>,
) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportLevel)] {
if self.root.is_proc_macro_crate() {
// If this crate is a custom derive crate, then we're not even going to
// link those in so we skip those crates.
&[]
} else {
tcx.arena.alloc_from_iter(self.root.exported_symbols.decode((self, tcx)))
}
}
fn get_rendered_const(&self, id: DefIndex) -> String {
match self.kind(id) {
EntryKind::AnonConst(_, data)
| EntryKind::Const(_, data)
| EntryKind::AssocConst(_, _, data) => data.decode(self).0,
_ => bug!(),
}
}
fn get_macro(&self, id: DefIndex, sess: &Session) -> MacroDef {
match self.kind(id) {
EntryKind::MacroDef(macro_def) => macro_def.decode((self, sess)),
_ => bug!(),
}
}
// This replicates some of the logic of the crate-local `is_const_fn_raw` query, because we
// don't serialize constness for tuple variant and tuple struct constructors.
fn is_const_fn_raw(&self, id: DefIndex) -> bool {
let constness = match self.kind(id) {
EntryKind::AssocFn(data) => data.decode(self).fn_data.constness,
EntryKind::Fn(data) => data.decode(self).constness,
EntryKind::ForeignFn(data) => data.decode(self).constness,
EntryKind::Variant(..) | EntryKind::Struct(..) => hir::Constness::Const,
_ => hir::Constness::NotConst,
};
constness == hir::Constness::Const
}
fn asyncness(&self, id: DefIndex) -> hir::IsAsync {
match self.kind(id) {
EntryKind::Fn(data) => data.decode(self).asyncness,
EntryKind::AssocFn(data) => data.decode(self).fn_data.asyncness,
EntryKind::ForeignFn(data) => data.decode(self).asyncness,
_ => bug!("asyncness: expected function kind"),
}
}
fn is_foreign_item(&self, id: DefIndex) -> bool {
match self.kind(id) {
EntryKind::ForeignImmStatic | EntryKind::ForeignMutStatic | EntryKind::ForeignFn(_) => {
true
}
_ => false,
}
}
fn static_mutability(&self, id: DefIndex) -> Option<hir::Mutability> {
match self.kind(id) {
EntryKind::ImmStatic | EntryKind::ForeignImmStatic => Some(hir::Mutability::Not),
EntryKind::MutStatic | EntryKind::ForeignMutStatic => Some(hir::Mutability::Mut),
_ => None,
}
}
fn generator_kind(&self, id: DefIndex) -> Option<hir::GeneratorKind> {
match self.kind(id) {
EntryKind::Generator(data) => Some(data),
_ => None,
}
}
fn fn_sig(&self, id: DefIndex, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
self.root.tables.fn_sig.get(self, id).unwrap().decode((self, tcx))
}
#[inline]
fn def_key(&self, index: DefIndex) -> DefKey {
*self
.def_key_cache
.lock()
.entry(index)
.or_insert_with(|| self.root.tables.def_keys.get(self, index).unwrap().decode(self))
}
// Returns the path leading to the thing with this `id`.
fn def_path(&self, id: DefIndex) -> DefPath {
debug!("def_path(cnum={:?}, id={:?})", self.cnum, id);
DefPath::make(self.cnum, id, |parent| self.def_key(parent))
}
fn def_path_hash_unlocked(
&self,
index: DefIndex,
def_path_hashes: &mut FxHashMap<DefIndex, DefPathHash>,
) -> DefPathHash {
*def_path_hashes.entry(index).or_insert_with(|| {
self.root.tables.def_path_hashes.get(self, index).unwrap().decode(self)
})
}
#[inline]
fn def_path_hash(&self, index: DefIndex) -> DefPathHash {
let mut def_path_hashes = self.def_path_hash_cache.lock();
self.def_path_hash_unlocked(index, &mut def_path_hashes)
}
#[inline]
fn def_path_hash_to_def_index(&self, hash: DefPathHash) -> DefIndex {
self.def_path_hash_map.def_path_hash_to_def_index(&hash)
}
fn expn_hash_to_expn_id(&self, sess: &Session, index_guess: u32, hash: ExpnHash) -> ExpnId {
debug_assert_eq!(ExpnId::from_hash(hash), None);
let index_guess = ExpnIndex::from_u32(index_guess);
let old_hash = self.root.expn_hashes.get(self, index_guess).map(|lazy| lazy.decode(self));
let index = if old_hash == Some(hash) {
// Fast path: the expn and its index is unchanged from the
// previous compilation session. There is no need to decode anything
// else.
index_guess
} else {
// Slow path: We need to find out the new `DefIndex` of the provided
// `DefPathHash`, if its still exists. This requires decoding every `DefPathHash`
// stored in this crate.
let map = self.cdata.expn_hash_map.get_or_init(|| {
let end_id = self.root.expn_hashes.size() as u32;
let mut map =
UnhashMap::with_capacity_and_hasher(end_id as usize, Default::default());
for i in 0..end_id {
let i = ExpnIndex::from_u32(i);
if let Some(hash) = self.root.expn_hashes.get(self, i) {
map.insert(hash.decode(self), i);
}
}
map
});
map[&hash]
};
let data = self.root.expn_data.get(self, index).unwrap().decode((self, sess));
rustc_span::hygiene::register_expn_id(self.cnum, index, data, hash)
}
/// Imports the source_map from an external crate into the source_map of the crate
/// currently being compiled (the "local crate").
///
/// The import algorithm works analogous to how AST items are inlined from an
/// external crate's metadata:
/// For every SourceFile in the external source_map an 'inline' copy is created in the
/// local source_map. The correspondence relation between external and local
/// SourceFiles is recorded in the `ImportedSourceFile` objects returned from this
/// function. When an item from an external crate is later inlined into this
/// crate, this correspondence information is used to translate the span
/// information of the inlined item so that it refers the correct positions in
/// the local source_map (see `<decoder::DecodeContext as SpecializedDecoder<Span>>`).
///
/// The import algorithm in the function below will reuse SourceFiles already
/// existing in the local source_map. For example, even if the SourceFile of some
/// source file of libstd gets imported many times, there will only ever be
/// one SourceFile object for the corresponding file in the local source_map.
///
/// Note that imported SourceFiles do not actually contain the source code of the
/// file they represent, just information about length, line breaks, and
/// multibyte characters. This information is enough to generate valid debuginfo
/// for items inlined from other crates.
///
/// Proc macro crates don't currently export spans, so this function does not have
/// to work for them.
fn imported_source_files(&self, sess: &Session) -> &'a [ImportedSourceFile] {
// Translate the virtual `/rustc/$hash` prefix back to a real directory
// that should hold actual sources, where possible.
//
// NOTE: if you update this, you might need to also update bootstrap's code for generating
// the `rust-src` component in `Src::run` in `src/bootstrap/dist.rs`.
let virtual_rust_source_base_dir = option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR")
.map(Path::new)
.filter(|_| {
// Only spend time on further checks if we have what to translate *to*.
sess.opts.real_rust_source_base_dir.is_some()
})
.filter(|virtual_dir| {
// Don't translate away `/rustc/$hash` if we're still remapping to it,
// since that means we're still building `std`/`rustc` that need it,
// and we don't want the real path to leak into codegen/debuginfo.
!sess.opts.remap_path_prefix.iter().any(|(_from, to)| to == virtual_dir)
});
let try_to_translate_virtual_to_real = |name: &mut rustc_span::FileName| {
debug!(
"try_to_translate_virtual_to_real(name={:?}): \
virtual_rust_source_base_dir={:?}, real_rust_source_base_dir={:?}",
name, virtual_rust_source_base_dir, sess.opts.real_rust_source_base_dir,
);
if let Some(virtual_dir) = virtual_rust_source_base_dir {
if let Some(real_dir) = &sess.opts.real_rust_source_base_dir {
if let rustc_span::FileName::Real(old_name) = name {
if let rustc_span::RealFileName::Remapped { local_path: _, virtual_name } =
old_name
{
if let Ok(rest) = virtual_name.strip_prefix(virtual_dir) {
let virtual_name = virtual_name.clone();
// The std library crates are in
// `$sysroot/lib/rustlib/src/rust/library`, whereas other crates
// may be in `$sysroot/lib/rustlib/src/rust/` directly. So we
// detect crates from the std libs and handle them specially.
const STD_LIBS: &[&str] = &[
"core",
"alloc",
"std",
"test",
"term",
"unwind",
"proc_macro",
"panic_abort",
"panic_unwind",
"profiler_builtins",
"rtstartup",
"rustc-std-workspace-core",
"rustc-std-workspace-alloc",
"rustc-std-workspace-std",
"backtrace",
];
let is_std_lib = STD_LIBS.iter().any(|l| rest.starts_with(l));
let new_path = if is_std_lib {
real_dir.join("library").join(rest)
} else {
real_dir.join(rest)
};
debug!(
"try_to_translate_virtual_to_real: `{}` -> `{}`",
virtual_name.display(),
new_path.display(),
);
let new_name = rustc_span::RealFileName::Remapped {
local_path: Some(new_path),
virtual_name,
};
*old_name = new_name;
}
}
}
}
}
};
self.cdata.source_map_import_info.get_or_init(|| {
let external_source_map = self.root.source_map.decode(self);
external_source_map
.map(|source_file_to_import| {
// We can't reuse an existing SourceFile, so allocate a new one
// containing the information we need.
let rustc_span::SourceFile {
mut name,
src_hash,
start_pos,
end_pos,
mut lines,
mut multibyte_chars,
mut non_narrow_chars,
mut normalized_pos,
name_hash,
..
} = source_file_to_import;
// If this file is under $sysroot/lib/rustlib/src/ but has not been remapped
// during rust bootstrapping by `remap-debuginfo = true`, and the user
// wish to simulate that behaviour by -Z simulate-remapped-rust-src-base,
// then we change `name` to a similar state as if the rust was bootstrapped
// with `remap-debuginfo = true`.
// This is useful for testing so that tests about the effects of
// `try_to_translate_virtual_to_real` don't have to worry about how the
// compiler is bootstrapped.
if let Some(virtual_dir) =
&sess.opts.debugging_opts.simulate_remapped_rust_src_base
{
if let Some(real_dir) = &sess.opts.real_rust_source_base_dir {
if let rustc_span::FileName::Real(ref mut old_name) = name {
if let rustc_span::RealFileName::LocalPath(local) = old_name {
if let Ok(rest) = local.strip_prefix(real_dir) {
*old_name = rustc_span::RealFileName::Remapped {
local_path: None,
virtual_name: virtual_dir.join(rest),
};
}
}
}
}
}
// If this file's path has been remapped to `/rustc/$hash`,
// we might be able to reverse that (also see comments above,
// on `try_to_translate_virtual_to_real`).
try_to_translate_virtual_to_real(&mut name);
let source_length = (end_pos - start_pos).to_usize();
// Translate line-start positions and multibyte character
// position into frame of reference local to file.
// `SourceMap::new_imported_source_file()` will then translate those
// coordinates to their new global frame of reference when the
// offset of the SourceFile is known.
for pos in &mut lines {
*pos = *pos - start_pos;
}
for mbc in &mut multibyte_chars {
mbc.pos = mbc.pos - start_pos;
}
for swc in &mut non_narrow_chars {
*swc = *swc - start_pos;
}
for np in &mut normalized_pos {
np.pos = np.pos - start_pos;
}
let local_version = sess.source_map().new_imported_source_file(
name,
src_hash,
name_hash,
source_length,
self.cnum,
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
start_pos,
end_pos,
);
debug!(
"CrateMetaData::imported_source_files alloc \
source_file {:?} original (start_pos {:?} end_pos {:?}) \
translated (start_pos {:?} end_pos {:?})",
local_version.name,
start_pos,
end_pos,
local_version.start_pos,
local_version.end_pos
);
ImportedSourceFile {
original_start_pos: start_pos,
original_end_pos: end_pos,
translated_source_file: local_version,
}
})
.collect()
})
}
}
impl CrateMetadata {
crate fn new(
sess: &Session,
blob: MetadataBlob,
root: CrateRoot<'static>,
raw_proc_macros: Option<&'static [ProcMacro]>,
cnum: CrateNum,
cnum_map: CrateNumMap,
dep_kind: CrateDepKind,
source: CrateSource,
private_dep: bool,
host_hash: Option<Svh>,
) -> CrateMetadata {
let trait_impls = root
.impls
.decode((&blob, sess))
.map(|trait_impls| (trait_impls.trait_id, trait_impls.impls))
.collect();
let alloc_decoding_state =
AllocDecodingState::new(root.interpret_alloc_index.decode(&blob).collect());
let dependencies = Lock::new(cnum_map.iter().cloned().collect());
// Pre-decode the DefPathHash->DefIndex table. This is a cheap operation
// that does not copy any data. It just does some data verification.
let def_path_hash_map = root.def_path_hash_map.decode(&blob);
CrateMetadata {
blob,
root,
trait_impls,
raw_proc_macros,
source_map_import_info: OnceCell::new(),
def_path_hash_map,
expn_hash_map: Default::default(),
alloc_decoding_state,
cnum,
cnum_map,
dependencies,
dep_kind: Lock::new(dep_kind),
source,
private_dep,
host_hash,
extern_crate: Lock::new(None),
hygiene_context: Default::default(),
def_key_cache: Default::default(),
def_path_hash_cache: Default::default(),
}
}
crate fn dependencies(&self) -> LockGuard<'_, Vec<CrateNum>> {
self.dependencies.borrow()
}
crate fn add_dependency(&self, cnum: CrateNum) {
self.dependencies.borrow_mut().push(cnum);
}
crate fn update_extern_crate(&self, new_extern_crate: ExternCrate) -> bool {
let mut extern_crate = self.extern_crate.borrow_mut();
let update = Some(new_extern_crate.rank()) > extern_crate.as_ref().map(ExternCrate::rank);
if update {
*extern_crate = Some(new_extern_crate);
}
update
}
crate fn source(&self) -> &CrateSource {
&self.source
}
crate fn dep_kind(&self) -> CrateDepKind {
*self.dep_kind.lock()
}
crate fn update_dep_kind(&self, f: impl FnOnce(CrateDepKind) -> CrateDepKind) {
self.dep_kind.with_lock(|dep_kind| *dep_kind = f(*dep_kind))
}
crate fn panic_strategy(&self) -> PanicStrategy {
self.root.panic_strategy
}
crate fn needs_panic_runtime(&self) -> bool {
self.root.needs_panic_runtime
}
crate fn is_panic_runtime(&self) -> bool {
self.root.panic_runtime
}
crate fn is_profiler_runtime(&self) -> bool {
self.root.profiler_runtime
}
crate fn needs_allocator(&self) -> bool {
self.root.needs_allocator
}
crate fn has_global_allocator(&self) -> bool {
self.root.has_global_allocator
}
crate fn has_default_lib_allocator(&self) -> bool {
self.root.has_default_lib_allocator
}
crate fn is_proc_macro_crate(&self) -> bool {
self.root.is_proc_macro_crate()
}
crate fn name(&self) -> Symbol {
self.root.name
}
crate fn stable_crate_id(&self) -> StableCrateId {
self.root.stable_crate_id
}
crate fn hash(&self) -> Svh {
self.root.hash
}
fn num_def_ids(&self) -> usize {
self.root.tables.def_keys.size()
}
fn local_def_id(&self, index: DefIndex) -> DefId {
DefId { krate: self.cnum, index }
}
// Translate a DefId from the current compilation environment to a DefId
// for an external crate.
fn reverse_translate_def_id(&self, did: DefId) -> Option<DefId> {
for (local, &global) in self.cnum_map.iter_enumerated() {
if global == did.krate {
return Some(DefId { krate: local, index: did.index });
}
}
None
}
}
// Cannot be implemented on 'ProcMacro', as libproc_macro
// does not depend on librustc_ast
fn macro_kind(raw: &ProcMacro) -> MacroKind {
match raw {
ProcMacro::CustomDerive { .. } => MacroKind::Derive,
ProcMacro::Attr { .. } => MacroKind::Attr,
ProcMacro::Bang { .. } => MacroKind::Bang,
}
}
| 37.989163 | 111 | 0.546734 |
9092169e1820067e1f7a4833e6e3b29792639626 | 28,974 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::Span;
use ext::base::ExtCtxt;
use ext::base;
use ext::build::AstBuilder;
use parse::token::*;
use parse::token;
use ptr::P;
/// Quasiquoting works via token trees.
///
/// This is registered as a set of expression syntax extension called quote!
/// that lifts its argument token-tree to an AST representing the
/// construction of the same token tree, with token::SubstNt interpreted
/// as antiquotes (splices).
pub mod rt {
use ast;
use codemap::Spanned;
use ext::base::ExtCtxt;
use parse::token;
use parse;
use print::pprust;
use ptr::P;
use ast::{TokenTree, Generics, Expr};
pub use parse::new_parser_from_tts;
pub use codemap::{BytePos, Span, dummy_spanned};
pub trait ToTokens {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> ;
}
impl ToTokens for TokenTree {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec!(self.clone())
}
}
impl<T: ToTokens> ToTokens for Vec<T> {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
self.iter().flat_map(|t| t.to_tokens(cx).into_iter()).collect()
}
}
impl<T: ToTokens> ToTokens for Spanned<T> {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
// FIXME: use the span?
self.node.to_tokens(cx)
}
}
impl<T: ToTokens> ToTokens for Option<T> {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
match self {
&Some(ref t) => t.to_tokens(cx),
&None => Vec::new(),
}
}
}
/* Should be (when bugs in default methods are fixed):
trait ToSource : ToTokens {
// Takes a thing and generates a string containing rust code for it.
pub fn to_source() -> String;
// If you can make source, you can definitely make tokens.
pub fn to_tokens(cx: &ExtCtxt) -> ~[TokenTree] {
cx.parse_tts(self.to_source())
}
}
*/
// FIXME: Move this trait to pprust and get rid of *_to_str?
pub trait ToSource {
// Takes a thing and generates a string containing rust code for it.
fn to_source(&self) -> String;
}
// FIXME (Issue #16472): This should go away after ToToken impls
// are revised to go directly to token-trees.
trait ToSourceWithHygiene : ToSource {
// Takes a thing and generates a string containing rust code
// for it, encoding Idents as special byte sequences to
// maintain hygiene across serialization and deserialization.
fn to_source_with_hygiene(&self) -> String;
}
macro_rules! impl_to_source {
(P<$t:ty>, $pp:ident) => (
impl ToSource for P<$t> {
fn to_source(&self) -> String {
pprust::$pp(&**self)
}
}
impl ToSourceWithHygiene for P<$t> {
fn to_source_with_hygiene(&self) -> String {
pprust::with_hygiene::$pp(&**self)
}
}
);
($t:ty, $pp:ident) => (
impl ToSource for $t {
fn to_source(&self) -> String {
pprust::$pp(self)
}
}
impl ToSourceWithHygiene for $t {
fn to_source_with_hygiene(&self) -> String {
pprust::with_hygiene::$pp(self)
}
}
);
}
fn slice_to_source<'a, T: ToSource>(sep: &'static str, xs: &'a [T]) -> String {
xs.iter()
.map(|i| i.to_source())
.collect::<Vec<String>>()
.connect(sep)
.to_string()
}
fn slice_to_source_with_hygiene<'a, T: ToSourceWithHygiene>(
sep: &'static str, xs: &'a [T]) -> String {
xs.iter()
.map(|i| i.to_source_with_hygiene())
.collect::<Vec<String>>()
.connect(sep)
.to_string()
}
macro_rules! impl_to_source_slice {
($t:ty, $sep:expr) => (
impl ToSource for [$t] {
fn to_source(&self) -> String {
slice_to_source($sep, self)
}
}
impl ToSourceWithHygiene for [$t] {
fn to_source_with_hygiene(&self) -> String {
slice_to_source_with_hygiene($sep, self)
}
}
)
}
impl ToSource for ast::Ident {
fn to_source(&self) -> String {
token::get_ident(*self).get().to_string()
}
}
impl ToSourceWithHygiene for ast::Ident {
fn to_source_with_hygiene(&self) -> String {
self.encode_with_hygiene()
}
}
impl_to_source! { ast::Ty, ty_to_string }
impl_to_source! { ast::Block, block_to_string }
impl_to_source! { ast::Arg, arg_to_string }
impl_to_source! { Generics, generics_to_string }
impl_to_source! { P<ast::Item>, item_to_string }
impl_to_source! { P<ast::Method>, method_to_string }
impl_to_source! { P<ast::Stmt>, stmt_to_string }
impl_to_source! { P<ast::Expr>, expr_to_string }
impl_to_source! { P<ast::Pat>, pat_to_string }
impl_to_source! { ast::Arm, arm_to_string }
impl_to_source_slice! { ast::Ty, ", " }
impl_to_source_slice! { P<ast::Item>, "\n\n" }
impl ToSource for ast::Attribute_ {
fn to_source(&self) -> String {
pprust::attribute_to_string(&dummy_spanned(self.clone()))
}
}
impl ToSourceWithHygiene for ast::Attribute_ {
fn to_source_with_hygiene(&self) -> String {
self.to_source()
}
}
impl ToSource for str {
fn to_source(&self) -> String {
let lit = dummy_spanned(ast::LitStr(
token::intern_and_get_ident(self), ast::CookedStr));
pprust::lit_to_string(&lit)
}
}
impl ToSourceWithHygiene for str {
fn to_source_with_hygiene(&self) -> String {
self.to_source()
}
}
impl ToSource for () {
fn to_source(&self) -> String {
"()".to_string()
}
}
impl ToSourceWithHygiene for () {
fn to_source_with_hygiene(&self) -> String {
self.to_source()
}
}
impl ToSource for bool {
fn to_source(&self) -> String {
let lit = dummy_spanned(ast::LitBool(*self));
pprust::lit_to_string(&lit)
}
}
impl ToSourceWithHygiene for bool {
fn to_source_with_hygiene(&self) -> String {
self.to_source()
}
}
impl ToSource for char {
fn to_source(&self) -> String {
let lit = dummy_spanned(ast::LitChar(*self));
pprust::lit_to_string(&lit)
}
}
impl ToSourceWithHygiene for char {
fn to_source_with_hygiene(&self) -> String {
self.to_source()
}
}
macro_rules! impl_to_source_int {
(signed, $t:ty, $tag:expr) => (
impl ToSource for $t {
fn to_source(&self) -> String {
let lit = ast::LitInt(*self as u64, ast::SignedIntLit($tag,
ast::Sign::new(*self)));
pprust::lit_to_string(&dummy_spanned(lit))
}
}
impl ToSourceWithHygiene for $t {
fn to_source_with_hygiene(&self) -> String {
self.to_source()
}
}
);
(unsigned, $t:ty, $tag:expr) => (
impl ToSource for $t {
fn to_source(&self) -> String {
let lit = ast::LitInt(*self as u64, ast::UnsignedIntLit($tag));
pprust::lit_to_string(&dummy_spanned(lit))
}
}
impl ToSourceWithHygiene for $t {
fn to_source_with_hygiene(&self) -> String {
self.to_source()
}
}
);
}
impl_to_source_int! { signed, int, ast::TyIs(false) }
impl_to_source_int! { signed, i8, ast::TyI8 }
impl_to_source_int! { signed, i16, ast::TyI16 }
impl_to_source_int! { signed, i32, ast::TyI32 }
impl_to_source_int! { signed, i64, ast::TyI64 }
impl_to_source_int! { unsigned, uint, ast::TyUs(false) }
impl_to_source_int! { unsigned, u8, ast::TyU8 }
impl_to_source_int! { unsigned, u16, ast::TyU16 }
impl_to_source_int! { unsigned, u32, ast::TyU32 }
impl_to_source_int! { unsigned, u64, ast::TyU64 }
// Alas ... we write these out instead. All redundant.
macro_rules! impl_to_tokens {
($t:ty) => (
impl ToTokens for $t {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
cx.parse_tts_with_hygiene(self.to_source_with_hygiene())
}
}
)
}
macro_rules! impl_to_tokens_lifetime {
($t:ty) => (
impl<'a> ToTokens for $t {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
cx.parse_tts_with_hygiene(self.to_source_with_hygiene())
}
}
)
}
impl_to_tokens! { ast::Ident }
impl_to_tokens! { P<ast::Item> }
impl_to_tokens! { P<ast::Pat> }
impl_to_tokens! { ast::Arm }
impl_to_tokens! { P<ast::Method> }
impl_to_tokens_lifetime! { &'a [P<ast::Item>] }
impl_to_tokens! { ast::Ty }
impl_to_tokens_lifetime! { &'a [ast::Ty] }
impl_to_tokens! { Generics }
impl_to_tokens! { P<ast::Stmt> }
impl_to_tokens! { P<ast::Expr> }
impl_to_tokens! { ast::Block }
impl_to_tokens! { ast::Arg }
impl_to_tokens! { ast::Attribute_ }
impl_to_tokens_lifetime! { &'a str }
impl_to_tokens! { () }
impl_to_tokens! { char }
impl_to_tokens! { bool }
impl_to_tokens! { int }
impl_to_tokens! { i8 }
impl_to_tokens! { i16 }
impl_to_tokens! { i32 }
impl_to_tokens! { i64 }
impl_to_tokens! { uint }
impl_to_tokens! { u8 }
impl_to_tokens! { u16 }
impl_to_tokens! { u32 }
impl_to_tokens! { u64 }
pub trait ExtParseUtils {
fn parse_item(&self, s: String) -> P<ast::Item>;
fn parse_expr(&self, s: String) -> P<ast::Expr>;
fn parse_stmt(&self, s: String) -> P<ast::Stmt>;
fn parse_tts(&self, s: String) -> Vec<ast::TokenTree>;
}
trait ExtParseUtilsWithHygiene {
// FIXME (Issue #16472): This should go away after ToToken impls
// are revised to go directly to token-trees.
fn parse_tts_with_hygiene(&self, s: String) -> Vec<ast::TokenTree>;
}
impl<'a> ExtParseUtils for ExtCtxt<'a> {
fn parse_item(&self, s: String) -> P<ast::Item> {
parse::parse_item_from_source_str(
"<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess()).expect("parse error")
}
fn parse_stmt(&self, s: String) -> P<ast::Stmt> {
parse::parse_stmt_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
Vec::new(),
self.parse_sess())
}
fn parse_expr(&self, s: String) -> P<ast::Expr> {
parse::parse_expr_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess())
}
fn parse_tts(&self, s: String) -> Vec<ast::TokenTree> {
parse::parse_tts_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess())
}
}
impl<'a> ExtParseUtilsWithHygiene for ExtCtxt<'a> {
fn parse_tts_with_hygiene(&self, s: String) -> Vec<ast::TokenTree> {
use parse::with_hygiene::parse_tts_from_source_str;
parse_tts_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess())
}
}
}
pub fn expand_quote_tokens<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let (cx_expr, expr) = expand_tts(cx, sp, tts);
let expanded = expand_wrapper(cx, sp, cx_expr, expr);
base::MacExpr::new(expanded)
}
pub fn expand_quote_expr<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let expanded = expand_parse_call(cx, sp, "parse_expr", Vec::new(), tts);
base::MacExpr::new(expanded)
}
pub fn expand_quote_item<'cx>(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let expanded = expand_parse_call(cx, sp, "parse_item_with_outer_attributes",
vec!(), tts);
base::MacExpr::new(expanded)
}
pub fn expand_quote_pat<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let expanded = expand_parse_call(cx, sp, "parse_pat", vec!(), tts);
base::MacExpr::new(expanded)
}
pub fn expand_quote_arm(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let expanded = expand_parse_call(cx, sp, "parse_arm", vec!(), tts);
base::MacExpr::new(expanded)
}
pub fn expand_quote_ty(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let expanded = expand_parse_call(cx, sp, "parse_ty", vec!(), tts);
base::MacExpr::new(expanded)
}
pub fn expand_quote_method(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let expanded = expand_parse_call(cx, sp, "parse_method_with_outer_attributes",
vec!(), tts);
base::MacExpr::new(expanded)
}
pub fn expand_quote_stmt(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let e_attrs = cx.expr_vec_ng(sp);
let expanded = expand_parse_call(cx, sp, "parse_stmt",
vec!(e_attrs), tts);
base::MacExpr::new(expanded)
}
fn ids_ext(strs: Vec<String> ) -> Vec<ast::Ident> {
strs.iter().map(|str| str_to_ident(&(*str)[])).collect()
}
fn id_ext(str: &str) -> ast::Ident {
str_to_ident(str)
}
// Lift an ident to the expr that evaluates to that ident.
fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> {
let e_str = cx.expr_str(sp, token::get_ident(ident));
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("ident_of"),
vec!(e_str))
}
// Lift a name to the expr that evaluates to that name
fn mk_name(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> {
let e_str = cx.expr_str(sp, token::get_ident(ident));
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("name_of"),
vec!(e_str))
}
fn mk_ast_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> {
let idents = vec!(id_ext("syntax"), id_ext("ast"), id_ext(name));
cx.expr_path(cx.path_global(sp, idents))
}
fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> {
let idents = vec!(id_ext("syntax"), id_ext("parse"), id_ext("token"), id_ext(name));
cx.expr_path(cx.path_global(sp, idents))
}
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P<ast::Expr> {
let name = match bop {
token::Plus => "Plus",
token::Minus => "Minus",
token::Star => "Star",
token::Slash => "Slash",
token::Percent => "Percent",
token::Caret => "Caret",
token::And => "And",
token::Or => "Or",
token::Shl => "Shl",
token::Shr => "Shr"
};
mk_token_path(cx, sp, name)
}
fn mk_delim(cx: &ExtCtxt, sp: Span, delim: token::DelimToken) -> P<ast::Expr> {
let name = match delim {
token::Paren => "Paren",
token::Bracket => "Bracket",
token::Brace => "Brace",
};
mk_token_path(cx, sp, name)
}
#[allow(non_upper_case_globals)]
fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
macro_rules! mk_lit {
($name: expr, $suffix: expr, $($args: expr),*) => {{
let inner = cx.expr_call(sp, mk_token_path(cx, sp, $name), vec![$($args),*]);
let suffix = match $suffix {
Some(name) => cx.expr_some(sp, mk_name(cx, sp, ast::Ident::new(name))),
None => cx.expr_none(sp)
};
cx.expr_call(sp, mk_token_path(cx, sp, "Literal"), vec![inner, suffix])
}}
}
match *tok {
token::BinOp(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop)));
}
token::BinOpEq(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"),
vec!(mk_binop(cx, sp, binop)));
}
token::OpenDelim(delim) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "OpenDelim"),
vec![mk_delim(cx, sp, delim)]);
}
token::CloseDelim(delim) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "CloseDelim"),
vec![mk_delim(cx, sp, delim)]);
}
token::Literal(token::Byte(i), suf) => {
let e_byte = mk_name(cx, sp, i.ident());
return mk_lit!("Byte", suf, e_byte);
}
token::Literal(token::Char(i), suf) => {
let e_char = mk_name(cx, sp, i.ident());
return mk_lit!("Char", suf, e_char);
}
token::Literal(token::Integer(i), suf) => {
let e_int = mk_name(cx, sp, i.ident());
return mk_lit!("Integer", suf, e_int);
}
token::Literal(token::Float(fident), suf) => {
let e_fident = mk_name(cx, sp, fident.ident());
return mk_lit!("Float", suf, e_fident);
}
token::Literal(token::Str_(ident), suf) => {
return mk_lit!("Str_", suf, mk_name(cx, sp, ident.ident()))
}
token::Literal(token::StrRaw(ident, n), suf) => {
return mk_lit!("StrRaw", suf, mk_name(cx, sp, ident.ident()), cx.expr_usize(sp, n))
}
token::Ident(ident, style) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "Ident"),
vec![mk_ident(cx, sp, ident),
match style {
ModName => mk_token_path(cx, sp, "ModName"),
Plain => mk_token_path(cx, sp, "Plain"),
}]);
}
token::Lifetime(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "Lifetime"),
vec!(mk_ident(cx, sp, ident)));
}
token::DocComment(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "DocComment"),
vec!(mk_name(cx, sp, ident.ident())));
}
token::Interpolated(_) => panic!("quote! with interpolated token"),
_ => ()
}
let name = match *tok {
token::Eq => "Eq",
token::Lt => "Lt",
token::Le => "Le",
token::EqEq => "EqEq",
token::Ne => "Ne",
token::Ge => "Ge",
token::Gt => "Gt",
token::AndAnd => "AndAnd",
token::OrOr => "OrOr",
token::Not => "Not",
token::Tilde => "Tilde",
token::At => "At",
token::Dot => "Dot",
token::DotDot => "DotDot",
token::Comma => "Comma",
token::Semi => "Semi",
token::Colon => "Colon",
token::ModSep => "ModSep",
token::RArrow => "RArrow",
token::LArrow => "LArrow",
token::FatArrow => "FatArrow",
token::Pound => "Pound",
token::Dollar => "Dollar",
token::Underscore => "Underscore",
token::Eof => "Eof",
_ => panic!(),
};
mk_token_path(cx, sp, name)
}
fn mk_tt(cx: &ExtCtxt, tt: &ast::TokenTree) -> Vec<P<ast::Stmt>> {
match *tt {
ast::TtToken(sp, SubstNt(ident, _)) => {
// tt.extend($ident.to_tokens(ext_cx).into_iter())
let e_to_toks =
cx.expr_method_call(sp,
cx.expr_ident(sp, ident),
id_ext("to_tokens"),
vec!(cx.expr_ident(sp, id_ext("ext_cx"))));
let e_to_toks =
cx.expr_method_call(sp, e_to_toks, id_ext("into_iter"), vec![]);
let e_push =
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("tt")),
id_ext("extend"),
vec!(e_to_toks));
vec!(cx.stmt_expr(e_push))
}
ref tt @ ast::TtToken(_, MatchNt(..)) => {
let mut seq = vec![];
for i in 0..tt.len() {
seq.push(tt.get_tt(i));
}
mk_tts(cx, &seq[])
}
ast::TtToken(sp, ref tok) => {
let e_sp = cx.expr_ident(sp, id_ext("_sp"));
let e_tok = cx.expr_call(sp,
mk_ast_path(cx, sp, "TtToken"),
vec!(e_sp, mk_token(cx, sp, tok)));
let e_push =
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("tt")),
id_ext("push"),
vec!(e_tok));
vec!(cx.stmt_expr(e_push))
},
ast::TtDelimited(_, ref delimed) => {
mk_tt(cx, &delimed.open_tt()).into_iter()
.chain(delimed.tts.iter().flat_map(|tt| mk_tt(cx, tt).into_iter()))
.chain(mk_tt(cx, &delimed.close_tt()).into_iter())
.collect()
},
ast::TtSequence(..) => panic!("TtSequence in quote!"),
}
}
fn mk_tts(cx: &ExtCtxt, tts: &[ast::TokenTree]) -> Vec<P<ast::Stmt>> {
let mut ss = Vec::new();
for tt in tts {
ss.extend(mk_tt(cx, tt).into_iter());
}
ss
}
fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> (P<ast::Expr>, P<ast::Expr>) {
// NB: It appears that the main parser loses its mind if we consider
// $foo as a TtNonterminal during the main parse, so we have to re-parse
// under quote_depth > 0. This is silly and should go away; the _guess_ is
// it has to do with transition away from supporting old-style macros, so
// try removing it when enough of them are gone.
let mut p = cx.new_parser_from_tts(tts);
p.quote_depth += 1;
let cx_expr = p.parse_expr();
if !p.eat(&token::Comma) {
p.fatal("expected token `,`");
}
let tts = p.parse_all_token_trees();
p.abort_if_errors();
// We also bind a single value, sp, to ext_cx.call_site()
//
// This causes every span in a token-tree quote to be attributed to the
// call site of the extension using the quote. We can't really do much
// better since the source of the quote may well be in a library that
// was not even parsed by this compilation run, that the user has no
// source code for (eg. in libsyntax, which they're just _using_).
//
// The old quasiquoter had an elaborate mechanism for denoting input
// file locations from which quotes originated; unfortunately this
// relied on feeding the source string of the quote back into the
// compiler (which we don't really want to do) and, in any case, only
// pushed the problem a very small step further back: an error
// resulting from a parse of the resulting quote is still attributed to
// the site the string literal occurred, which was in a source file
// _other_ than the one the user has control over. For example, an
// error in a quote from the protocol compiler, invoked in user code
// using macro_rules! for example, will be attributed to the macro_rules.rs
// file in libsyntax, which the user might not even have source to (unless
// they happen to have a compiler on hand). Over all, the phase distinction
// just makes quotes "hard to attribute". Possibly this could be fixed
// by recreating some of the original qq machinery in the tt regime
// (pushing fake FileMaps onto the parser to account for original sites
// of quotes, for example) but at this point it seems not likely to be
// worth the hassle.
let e_sp = cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("call_site"),
Vec::new());
let stmt_let_sp = cx.stmt_let(sp, false,
id_ext("_sp"),
e_sp);
let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_ng(sp));
let mut vector = vec!(stmt_let_sp, stmt_let_tt);
vector.extend(mk_tts(cx, &tts[]).into_iter());
let block = cx.expr_block(
cx.block_all(sp,
vector,
Some(cx.expr_ident(sp, id_ext("tt")))));
(cx_expr, block)
}
fn expand_wrapper(cx: &ExtCtxt,
sp: Span,
cx_expr: P<ast::Expr>,
expr: P<ast::Expr>) -> P<ast::Expr> {
// Explicitly borrow to avoid moving from the invoker (#16992)
let cx_expr_borrow = cx.expr_addr_of(sp, cx.expr_deref(sp, cx_expr));
let stmt_let_ext_cx = cx.stmt_let(sp, false, id_ext("ext_cx"), cx_expr_borrow);
let stmts = [
&["syntax", "ext", "quote", "rt"],
].iter().map(|path| {
let path = path.iter().map(|s| s.to_string()).collect();
cx.stmt_item(sp, cx.item_use_glob(sp, ast::Inherited, ids_ext(path)))
}).chain(Some(stmt_let_ext_cx).into_iter()).collect();
cx.expr_block(cx.block_all(sp, stmts, Some(expr)))
}
fn expand_parse_call(cx: &ExtCtxt,
sp: Span,
parse_method: &str,
arg_exprs: Vec<P<ast::Expr>> ,
tts: &[ast::TokenTree]) -> P<ast::Expr> {
let (cx_expr, tts_expr) = expand_tts(cx, sp, tts);
let cfg_call = |&:| cx.expr_method_call(
sp, cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("cfg"), Vec::new());
let parse_sess_call = |&:| cx.expr_method_call(
sp, cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("parse_sess"), Vec::new());
let new_parser_call =
cx.expr_call(sp,
cx.expr_ident(sp, id_ext("new_parser_from_tts")),
vec!(parse_sess_call(), cfg_call(), tts_expr));
let expr = cx.expr_method_call(sp, new_parser_call, id_ext(parse_method),
arg_exprs);
expand_wrapper(cx, sp, cx_expr, expr)
}
| 35.682266 | 99 | 0.512977 |
b92bc7881609863d1799f9b5a4ac3f038b1e20f6 | 15,768 | use async_ftp::{types::Result, FtpStream};
use libunftp::options::FtpsRequired;
use pretty_assertions::assert_eq;
use std::fmt::Debug;
use std::fs;
use std::io::{BufWriter, Write};
use std::path::PathBuf;
use std::{str, time::Duration};
use unftp_sbe_fs::ServerExt;
fn ensure_login_required<T: Debug>(r: Result<T>) {
let err = r.unwrap_err().to_string();
if !err.contains("530 Please authenticate") {
panic!("Could execute command without logging in!");
}
}
fn ensure_ftps_required<T: Debug>(r: Result<T>) {
let err = r.unwrap_err().to_string();
if !err.contains("534") {
panic!("FTPS enforcement is broken!");
}
}
#[tokio::test]
async fn connect() {
let addr: &str = "127.0.0.1:1234";
let path: PathBuf = std::env::temp_dir();
tokio::spawn(libunftp::Server::with_fs(path).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
async_ftp::FtpStream::connect(addr).await.unwrap();
}
#[tokio::test]
async fn login() {
let addr = "127.0.0.1:1235";
let path = std::env::temp_dir();
let username = "koen";
let password = "hoi";
tokio::spawn(libunftp::Server::with_fs(path).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = async_ftp::FtpStream::connect(addr).await.unwrap();
ftp_stream.login(username, password).await.unwrap();
}
#[tokio::test]
async fn ftps_require_works() {
struct Test {
username: &'static str,
mode_control_chan: FtpsRequired,
mode_data_chan: FtpsRequired,
give534: bool,
give534_data: bool,
}
let tests = [
// control channel tests
Test {
username: "anonymous",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::None,
give534: false,
give534_data: false,
},
Test {
username: "the-user",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::None,
give534: false,
give534_data: false,
},
Test {
username: "anonymous",
mode_control_chan: FtpsRequired::All,
mode_data_chan: FtpsRequired::None,
give534: true,
give534_data: false,
},
Test {
username: "the-user",
mode_control_chan: FtpsRequired::All,
mode_data_chan: FtpsRequired::None,
give534: true,
give534_data: false,
},
Test {
username: "AnonyMous",
mode_control_chan: FtpsRequired::Accounts,
mode_data_chan: FtpsRequired::None,
give534: false,
give534_data: false,
},
Test {
username: "the-user",
mode_control_chan: FtpsRequired::Accounts,
mode_data_chan: FtpsRequired::None,
give534: true,
give534_data: false,
},
// Data channel tests
Test {
username: "anonymous",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::None,
give534: false,
give534_data: false,
},
Test {
username: "the-user",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::None,
give534: false,
give534_data: false,
},
Test {
username: "anonymous",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::All,
give534: false,
give534_data: true,
},
Test {
username: "the-user",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::All,
give534: false,
give534_data: true,
},
Test {
username: "AnonyMous",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::Accounts,
give534: false,
give534_data: false,
},
Test {
username: "the-user",
mode_control_chan: FtpsRequired::None,
mode_data_chan: FtpsRequired::Accounts,
give534: false,
give534_data: true,
},
];
for test in tests.iter() {
let addr = "127.0.0.1:1250";
let (tx, mut rx) = tokio::sync::broadcast::channel::<()>(1);
tokio::spawn(
libunftp::Server::with_fs(std::env::temp_dir())
.ftps_required(test.mode_control_chan, test.mode_data_chan)
.shutdown_indicator(async move {
rx.recv().await.unwrap();
libunftp::options::Shutdown::default()
})
.listen(addr.clone()),
);
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = async_ftp::FtpStream::connect(addr).await.unwrap();
let result = ftp_stream.login(test.username, "blah").await;
if test.give534 {
ensure_ftps_required(result);
}
if test.give534_data {
let result = ftp_stream.list(None).await;
ensure_ftps_required(result);
}
drop(tx);
}
}
#[tokio::test(flavor = "current_thread")]
async fn noop() {
let addr = "127.0.0.1:1236";
let path = std::env::temp_dir();
tokio::spawn(libunftp::Server::with_fs(path).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = async_ftp::FtpStream::connect(addr).await.unwrap();
ftp_stream.noop().await.unwrap();
}
#[tokio::test]
async fn get() {
use std::io::Write;
let addr = "127.0.0.1:1237";
let path = std::env::temp_dir();
let mut filename = path.clone();
tokio::spawn(libunftp::Server::with_fs(path).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
// Create a temporary file in the FTP root that we'll retrieve
filename.push("bla.txt");
let mut f = std::fs::File::create(filename.clone()).unwrap();
// Write some random data to our file
let mut data = vec![0; 1024];
getrandom::getrandom(&mut data).expect("Error generating random bytes");
f.write_all(&data).unwrap();
// Retrieve the remote file
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
ensure_login_required(ftp_stream.simple_retr("bla.txt").await);
ftp_stream.login("hoi", "jij").await.unwrap();
let remote_file = ftp_stream.simple_retr("bla.txt").await.unwrap();
let remote_data = remote_file.into_inner();
assert_eq!(remote_data, data);
}
#[tokio::test]
async fn put() {
use std::io::Cursor;
let addr = "127.0.0.1:1238";
let path = std::env::temp_dir();
tokio::spawn(libunftp::Server::with_fs(path).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let content = b"Hello from this test!\n";
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
let mut reader = Cursor::new(content);
ensure_login_required(ftp_stream.put("greeting.txt", &mut reader).await);
ftp_stream.login("hoi", "jij").await.unwrap();
ftp_stream.put("greeting.txt", &mut reader).await.unwrap();
// retrieve file back again, and check if we got the same back.
let remote_data = ftp_stream.simple_retr("greeting.txt").await.unwrap().into_inner();
assert_eq!(remote_data, content);
}
#[tokio::test]
async fn list() {
let addr = "127.0.0.1:1239";
let root = std::env::temp_dir();
tokio::spawn(libunftp::Server::with_fs(root.clone()).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
// Create a filename in the ftp root that we will look for in the `LIST` output
let path = root.join("test.txt");
{
let _f = std::fs::File::create(path);
}
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
ensure_login_required(ftp_stream.list(None).await);
ftp_stream.login("hoi", "jij").await.unwrap();
let list = ftp_stream.list(None).await.unwrap();
let mut found = false;
for entry in list {
if entry.contains("test.txt") {
found = true;
break;
}
}
assert!(found);
}
#[tokio::test]
async fn pwd() {
let addr = "127.0.0.1:1240";
let root = std::env::temp_dir();
tokio::spawn(libunftp::Server::with_fs(root).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
// Make sure we fail if we're not logged in
ensure_login_required(ftp_stream.pwd().await);
ftp_stream.login("hoi", "jij").await.unwrap();
let pwd = ftp_stream.pwd().await.unwrap();
assert_eq!(&pwd, "/");
}
#[tokio::test]
async fn cwd() {
let addr = "127.0.0.1:1241";
let root = std::env::temp_dir();
let path = root.clone();
tokio::spawn(libunftp::Server::with_fs(path.clone()).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
let dir_in_root = tempfile::TempDir::new_in(path).unwrap();
let basename = dir_in_root.path().file_name().unwrap();
ensure_login_required(ftp_stream.cwd(basename.to_str().unwrap()).await);
ftp_stream.login("hoi", "jij").await.unwrap();
ftp_stream.cwd(basename.to_str().unwrap()).await.unwrap();
let pwd = ftp_stream.pwd().await.unwrap();
assert_eq!(std::path::Path::new(&pwd), std::path::Path::new("/").join(&basename));
}
#[tokio::test]
async fn cdup() {
let addr = "127.0.0.1:1242";
let root = std::env::temp_dir();
let path = root.clone();
tokio::spawn(libunftp::Server::with_fs(path.clone()).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
let dir_in_root = tempfile::TempDir::new_in(path).unwrap();
let basename = dir_in_root.path().file_name().unwrap();
ensure_login_required(ftp_stream.cdup().await);
ftp_stream.login("hoi", "jij").await.unwrap();
ftp_stream.cwd(basename.to_str().unwrap()).await.unwrap();
let pwd = ftp_stream.pwd().await.unwrap();
assert_eq!(std::path::Path::new(&pwd), std::path::Path::new("/").join(&basename));
ftp_stream.cdup().await.unwrap();
let pwd = ftp_stream.pwd().await.unwrap();
assert_eq!(std::path::Path::new(&pwd), std::path::Path::new("/"));
}
#[tokio::test]
async fn dele() {
let addr = "127.0.0.1:1243";
let root = std::env::temp_dir();
tokio::spawn(libunftp::Server::with_fs(root).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
let file_in_root = tempfile::NamedTempFile::new().unwrap();
let file_name = file_in_root.path().file_name().unwrap().to_str().unwrap();
ensure_login_required(ftp_stream.rm(file_name).await);
ftp_stream.login("hoi", "jij").await.unwrap();
ftp_stream.rm(file_name).await.unwrap();
assert_eq!(std::fs::metadata(file_name).unwrap_err().kind(), std::io::ErrorKind::NotFound);
}
#[tokio::test]
async fn quit() {
let addr = "127.0.0.1:1244";
let root = std::env::temp_dir();
tokio::spawn(libunftp::Server::with_fs(root).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
ftp_stream.quit().await.unwrap();
// Make sure the connection is actually closed
// This may take some time, so we'll sleep for a bit.
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
ftp_stream.noop().await.unwrap_err();
}
#[tokio::test]
async fn nlst() {
let addr = "127.0.0.1:1245";
let root = tempfile::TempDir::new().unwrap().into_path();
let path = root.clone();
tokio::spawn(libunftp::Server::with_fs(path.clone()).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
// Create a filename that we wanna see in the `NLST` output
let path = path.join("test.txt");
{
let _f = std::fs::File::create(path);
}
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
ensure_login_required(ftp_stream.nlst(None).await);
ftp_stream.login("hoi", "jij").await.unwrap();
let list = ftp_stream.nlst(None).await.unwrap();
assert_eq!(list, vec!["test.txt"]);
}
#[tokio::test]
async fn mkdir() {
let addr = "127.0.0.1:1246";
let root = tempfile::TempDir::new().unwrap().into_path();
tokio::spawn(libunftp::Server::with_fs(root.clone()).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
let new_dir_name = "hallo";
ensure_login_required(ftp_stream.mkdir(new_dir_name).await);
ftp_stream.login("hoi", "jij").await.unwrap();
ftp_stream.mkdir(new_dir_name).await.unwrap();
let full_path = root.join(new_dir_name);
let metadata = std::fs::metadata(full_path).unwrap();
assert!(metadata.is_dir());
}
#[tokio::test]
async fn rename() {
let addr = "127.0.0.1:1247";
let root = tempfile::TempDir::new().unwrap().into_path();
tokio::spawn(libunftp::Server::with_fs(root.clone()).listen(addr));
tokio::time::sleep(Duration::new(1, 0)).await;
// Create a file that we will rename
let full_from = root.join("ikbenhier.txt");
let _f = std::fs::File::create(&full_from);
let from_filename = full_from.file_name().unwrap().to_str().unwrap();
// What we'll rename our file to
let full_to = root.join("nu ben ik hier.txt");
let to_filename = full_to.file_name().unwrap().to_str().unwrap();
let mut ftp_stream = FtpStream::connect(addr).await.expect("Failed to connect");
// Make sure we fail if we're not logged in
ensure_login_required(ftp_stream.rename(&from_filename, &to_filename).await);
// Do the renaming
ftp_stream.login("some", "user").await.unwrap();
ftp_stream.rename(&from_filename, &to_filename).await.expect("Failed to rename");
// Give the OS some time to actually rename the thingy.
std::thread::sleep(std::time::Duration::from_millis(100));
// Make sure the old filename is gone
std::fs::metadata(full_from).expect_err("Renamed file still exists with old name");
// Make sure the new filename exists
let metadata = std::fs::metadata(full_to).expect("New filename not created");
assert!(metadata.is_file());
}
// This test hang on the latest Rust version it seems. Disabling till we fix
// #[tokio::test]
// async fn size() {
// let addr = "127.0.0.1:1251";
// let root = std::env::temp_dir();
// tokio::spawn(libunftp::Server::with_fs(root.clone()).listen(addr));
// tokio::time::sleep(Duration::new(1, 0)).await;
//
// let mut ftp_stream = FtpStream::connect(addr).await.unwrap();
// let file_in_root = tempfile::NamedTempFile::new_in(root).unwrap();
// let file_name = file_in_root.path().file_name().unwrap().to_str().unwrap();
//
// let mut w = BufWriter::new(&file_in_root);
// w.write_all(b"Hello unftp").expect("Should be able to write to the temp file.");
// w.flush().expect("Should be able to flush the temp file.");
//
// // Make sure we fail if we're not logged in
// ensure_login_required(ftp_stream.size(file_name).await);
// ftp_stream.login("hoi", "jij").await.unwrap();
//
// // Make sure we fail if we don't supply a path
// ftp_stream.size("").await.unwrap_err();
// let size1 = ftp_stream.size(file_name).await;
// let size2 = size1.unwrap();
// let size3 = size2.unwrap();
// assert_eq!(size3, fs::metadata(&file_in_root).unwrap().len() as usize, "Wrong size returned.");
// }
| 33.40678 | 102 | 0.616819 |
1c3222d88397027e8e27c85108f48a031f6d4160 | 13,571 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceListResult {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<QuantumWorkspace>,
#[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfferingsListResult {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ProviderDescription>,
#[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuantumWorkspace {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspaceResourceProperties>,
#[serde(skip_serializing_if = "Option::is_none")]
pub identity: Option<quantum_workspace::Identity>,
#[serde(rename = "systemData", skip_serializing)]
pub system_data: Option<SystemData>,
}
pub mod quantum_workspace {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Identity {
#[serde(rename = "principalId", skip_serializing)]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", skip_serializing)]
pub tenant_id: Option<String>,
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub type_: Option<identity::Type>,
}
pub mod identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagsObject {
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceResourceProperties {
#[serde(skip_serializing_if = "Vec::is_empty")]
pub providers: Vec<Provider>,
#[serde(skip_serializing)]
pub usable: Option<workspace_resource_properties::Usable>,
#[serde(rename = "provisioningState", skip_serializing)]
pub provisioning_state: Option<workspace_resource_properties::ProvisioningState>,
#[serde(rename = "storageAccount", skip_serializing_if = "Option::is_none")]
pub storage_account: Option<String>,
#[serde(rename = "endpointUri", skip_serializing)]
pub endpoint_uri: Option<String>,
}
pub mod workspace_resource_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Usable {
Yes,
No,
Partial,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
ProviderLaunching,
ProviderUpdating,
ProviderDeleting,
ProviderProvisioning,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProviderDescription {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<ProviderProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProviderProperties {
#[serde(skip_serializing)]
pub description: Option<String>,
#[serde(rename = "providerType", skip_serializing)]
pub provider_type: Option<String>,
#[serde(skip_serializing)]
pub company: Option<String>,
#[serde(rename = "defaultEndpoint", skip_serializing)]
pub default_endpoint: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub aad: Option<provider_properties::Aad>,
#[serde(rename = "managedApplication", skip_serializing_if = "Option::is_none")]
pub managed_application: Option<provider_properties::ManagedApplication>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub targets: Vec<TargetDescription>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub skus: Vec<SkuDescription>,
#[serde(rename = "quotaDimensions", skip_serializing_if = "Vec::is_empty")]
pub quota_dimensions: Vec<QuotaDimension>,
#[serde(rename = "pricingDimensions", skip_serializing_if = "Vec::is_empty")]
pub pricing_dimensions: Vec<PricingDimension>,
}
pub mod provider_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Aad {
#[serde(rename = "applicationId", skip_serializing)]
pub application_id: Option<String>,
#[serde(rename = "tenantId", skip_serializing)]
pub tenant_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedApplication {
#[serde(rename = "publisherId", skip_serializing)]
pub publisher_id: Option<String>,
#[serde(rename = "offerId", skip_serializing)]
pub offer_id: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TargetDescription {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "acceptedDataFormats", skip_serializing_if = "Vec::is_empty")]
pub accepted_data_formats: Vec<String>,
#[serde(rename = "acceptedContentEncodings", skip_serializing_if = "Vec::is_empty")]
pub accepted_content_encodings: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkuDescription {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "restrictedAccessUri", skip_serializing_if = "Option::is_none")]
pub restricted_access_uri: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub targets: Vec<String>,
#[serde(rename = "quotaDimensions", skip_serializing_if = "Vec::is_empty")]
pub quota_dimensions: Vec<QuotaDimension>,
#[serde(rename = "pricingDetails", skip_serializing_if = "Vec::is_empty")]
pub pricing_details: Vec<PricingDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaDimension {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub period: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub quota: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "unitPlural", skip_serializing_if = "Option::is_none")]
pub unit_plural: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PricingDetail {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PricingDimension {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Provider {
#[serde(rename = "providerId", skip_serializing_if = "Option::is_none")]
pub provider_id: Option<String>,
#[serde(rename = "providerSku", skip_serializing_if = "Option::is_none")]
pub provider_sku: Option<String>,
#[serde(rename = "instanceUri", skip_serializing_if = "Option::is_none")]
pub instance_uri: Option<String>,
#[serde(rename = "applicationName", skip_serializing_if = "Option::is_none")]
pub application_name: Option<String>,
#[serde(rename = "provisioningState", skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<provider::ProvisioningState>,
#[serde(rename = "resourceUsageId", skip_serializing_if = "Option::is_none")]
pub resource_usage_id: Option<String>,
}
pub mod provider {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Succeeded,
Launching,
Updating,
Deleting,
Deleted,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "isDataAction", skip_serializing_if = "Option::is_none")]
pub is_data_action: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationsList {
#[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityParameters {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityResult {
#[serde(rename = "nameAvailable", skip_serializing_if = "Option::is_none")]
pub name_available: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
#[serde(skip_serializing)]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetail {
#[serde(skip_serializing)]
pub code: Option<String>,
#[serde(skip_serializing)]
pub message: Option<String>,
#[serde(skip_serializing)]
pub target: Option<String>,
#[serde(skip_serializing)]
pub details: Vec<ErrorDetail>,
#[serde(rename = "additionalInfo", skip_serializing)]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", skip_serializing)]
pub type_: Option<String>,
#[serde(skip_serializing)]
pub info: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub location: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(skip_serializing)]
pub id: Option<String>,
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(rename = "type", skip_serializing)]
pub type_: Option<String>,
}
| 39.222543 | 88 | 0.690295 |
ed95d4e9b7187072fe8c0d3357700404b5c48d83 | 5,262 | // Copyright 2015-2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! ECDSA Signatures using the P-256 and P-384 curves.
use crate::{
digest,
ec::suite_b::ops::*,
limb::{self, LIMB_BYTES},
};
/// Calculate the digest of `msg` using the digest algorithm `digest_alg`. Then
/// convert the digest to a scalar in the range [0, n) as described in
/// NIST's FIPS 186-4 Section 4.2. Note that this is one of the few cases where
/// a `Scalar` is allowed to have the value zero.
///
/// NIST's FIPS 186-4 4.2 says "When the length of the output of the hash
/// function is greater than N (i.e., the bit length of q), then the leftmost N
/// bits of the hash function output block shall be used in any calculation
/// using the hash function output during the generation or verification of a
/// digital signature."
///
/// "Leftmost N bits" means "N most significant bits" because we interpret the
/// digest as a bit-endian encoded integer.
///
/// The NSA guide instead vaguely suggests that we should convert the digest
/// value to an integer and then reduce it mod `n`. However, real-world
/// implementations (e.g. `digest_to_bn` in OpenSSL and `hashToInt` in Go) do
/// what FIPS 186-4 says to do, not what the NSA guide suggests.
///
/// Why shifting the value right by at most one bit is sufficient: P-256's `n`
/// has its 256th bit set; i.e. 2**255 < n < 2**256. Once we've truncated the
/// digest to 256 bits and converted it to an integer, it will have a value
/// less than 2**256. If the value is larger than `n` then shifting it one bit
/// right will give a value less than 2**255, which is less than `n`. The
/// analogous argument applies for P-384. However, it does *not* apply in
/// general; for example, it doesn't apply to P-521.
pub fn digest_scalar(ops: &ScalarOps, msg: digest::Digest) -> Scalar {
digest_scalar_(ops, msg.as_ref())
}
#[cfg(test)]
pub(crate) fn digest_bytes_scalar(ops: &ScalarOps, digest: &[u8]) -> Scalar {
digest_scalar_(ops, digest)
}
// This is a separate function solely so that we can test specific digest
// values like all-zero values and values larger than `n`.
fn digest_scalar_(ops: &ScalarOps, digest: &[u8]) -> Scalar {
let cops = ops.common;
let num_limbs = cops.num_limbs;
let digest = if digest.len() > num_limbs * LIMB_BYTES {
&digest[..(num_limbs * LIMB_BYTES)]
} else {
digest
};
scalar_parse_big_endian_partially_reduced_variable_consttime(
cops,
limb::AllowZero::Yes,
untrusted::Input::from(digest),
)
.unwrap()
}
#[cfg(test)]
mod tests {
use super::digest_bytes_scalar;
use crate::{
digest,
ec::suite_b::ops::*,
limb::{self, LIMB_BYTES},
test,
};
#[test]
fn test() {
test::run(
test_file!("ecdsa_digest_scalar_tests.txt"),
|section, test_case| {
assert_eq!(section, "");
let curve_name = test_case.consume_string("Curve");
let digest_name = test_case.consume_string("Digest");
let input = test_case.consume_bytes("Input");
let output = test_case.consume_bytes("Output");
let (ops, digest_alg) = match (curve_name.as_str(), digest_name.as_str()) {
("P-256", "SHA256") => (&p256::PUBLIC_SCALAR_OPS, &digest::SHA256),
("P-256", "SHA384") => (&p256::PUBLIC_SCALAR_OPS, &digest::SHA384),
("P-384", "SHA256") => (&p384::PUBLIC_SCALAR_OPS, &digest::SHA256),
("P-384", "SHA384") => (&p384::PUBLIC_SCALAR_OPS, &digest::SHA384),
_ => {
panic!("Unsupported curve+digest: {}+{}", curve_name, digest_name);
}
};
let num_limbs = ops.public_key_ops.common.num_limbs;
assert_eq!(input.len(), digest_alg.output_len());
assert_eq!(
output.len(),
ops.public_key_ops.common.num_limbs * LIMB_BYTES
);
let expected = scalar_parse_big_endian_variable(
ops.public_key_ops.common,
limb::AllowZero::Yes,
untrusted::Input::from(&output),
)
.unwrap();
let actual = digest_bytes_scalar(ops.scalar_ops, &input);
assert_eq!(actual.limbs[..num_limbs], expected.limbs[..num_limbs]);
Ok(())
},
);
}
}
| 39.863636 | 91 | 0.622197 |
1a543af260486ba30294450257ad6f6d5dc2f722 | 586 | use std::error::Error;
use std::fmt;
/// Close without message but exit code.
#[derive(Debug)]
pub struct QuietExit(pub i32);
impl Error for QuietExit {}
impl fmt::Display for QuietExit {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
#[derive(Debug)]
pub struct ErrMsg(String);
impl Error for ErrMsg {}
impl fmt::Display for ErrMsg {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
pub fn err_msg<S: Into<String>>(s: S) -> Box<dyn Error> {
Box::new(ErrMsg(s.into()))
}
| 19.533333 | 58 | 0.602389 |
269bc9a07977cb2501e4d69d4f603a7354876658 | 1,391 | use std::sync::Arc;
use std::sync::Mutex;
use tbot::{
contexts::{Command, Text},
types::{input_file, parameters},
};
use crate::data::Database;
use crate::opml::into_opml;
use super::{check_channel_permission, update_response, MsgTarget};
pub async fn export(
db: Arc<Mutex<Database>>,
cmd: Arc<Command<Text>>,
) -> Result<(), tbot::errors::MethodCall> {
let chat_id = cmd.chat.id;
let channel = &cmd.text.value;
let mut target_id = chat_id;
let target = &mut MsgTarget::new(chat_id, cmd.message_id);
if !channel.is_empty() {
let user_id = cmd.from.as_ref().unwrap().id;
let channel_id = check_channel_permission(&cmd.bot, channel, target, user_id).await?;
if channel_id.is_none() {
return Ok(());
}
target_id = channel_id.unwrap();
}
let feeds = db.lock().unwrap().subscribed_feeds(target_id.0);
if feeds.is_none() {
update_response(
&cmd.bot,
target,
parameters::Text::with_plain(tr!("subscription_list_empty")),
)
.await?;
return Ok(());
}
let opml = into_opml(feeds.unwrap());
cmd.bot
.send_document(
chat_id,
input_file::Document::with_bytes("feeds.opml", opml.as_bytes()),
)
.in_reply_to(cmd.message_id)
.call()
.await?;
Ok(())
}
| 25.759259 | 93 | 0.585191 |
6adfdf6e6770cb34fd766b67c2dc13ae6b7edeae | 942 | use hal::pso;
use log::debug;
/// Dummy descriptor pool.
#[derive(Debug)]
pub struct DescriptorPool;
impl pso::DescriptorPool<crate::Backend> for DescriptorPool {
unsafe fn allocate_one(
&mut self,
_layout: &DescriptorSetLayout,
) -> Result<DescriptorSet, pso::AllocationError> {
Ok(DescriptorSet {
name: String::new(),
})
}
unsafe fn free<I>(&mut self, descriptor_sets: I)
where
I: Iterator<Item = DescriptorSet>,
{
for _ in descriptor_sets {
// Let the descriptor set drop
}
}
unsafe fn reset(&mut self) {
debug!("Resetting descriptor pool");
}
}
#[derive(Debug)]
pub struct DescriptorSetLayout {
/// User-defined name for this descriptor set layout
pub(crate) name: String,
}
#[derive(Debug)]
pub struct DescriptorSet {
/// User-defined name for this descriptor set
pub(crate) name: String,
}
| 21.906977 | 61 | 0.619958 |
214ffc16c37a21afa1add315cddfe97d9e584934 | 1,574 | #![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
//! <fullname>Security Token Service</fullname>
//! <p>Security Token Service (STS) enables you to request temporary, limited-privilege
//! credentials for Identity and Access Management (IAM) users or for users that you
//! authenticate (federated users). This guide provides descriptions of the STS API. For
//! more information about using this service, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html">Temporary Security Credentials</a>.</p>
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use error_meta::Error;
pub use config::Config;
mod aws_endpoint;
#[cfg(feature = "client")]
pub mod client;
pub mod config;
pub mod error;
mod error_meta;
pub mod input;
pub mod model;
pub mod operation;
mod operation_deser;
mod operation_ser;
pub mod output;
mod query_ser;
mod rest_xml_wrapped_errors;
mod xml_deser;
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use smithy_http::byte_stream::ByteStream;
pub use smithy_http::result::SdkError;
pub use smithy_types::Blob;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("sts", PKG_VERSION);
pub use aws_auth::Credentials;
pub use aws_types::region::Region;
#[cfg(feature = "client")]
pub use client::Client;
pub use smithy_http::endpoint::Endpoint;
| 35.772727 | 174 | 0.770648 |
3987c0022adf87799885a246391e8e12501cd9ea | 2,479 | use super::*;
/// Value converter for type `usize`
pub struct UsizeValue;
impl ConfigValueType for UsizeValue {
type Value = usize;
fn from_conf(conf: &Hocon) -> Result<Self::Value, ConfigError> {
let res = conf
.as_i64()
.ok_or_else(|| ConfigError::expected::<Self::Value>(conf))?;
let ures: usize = res.try_into()?;
Ok(ures)
}
fn config_string(value: Self::Value) -> String {
format!("{}", value)
}
}
/// Value converter for type `f32`
pub struct F32Value;
impl ConfigValueType for F32Value {
type Value = f32;
fn from_conf(conf: &Hocon) -> Result<Self::Value, ConfigError> {
conf.as_f64()
.map(|v| v as f32) // this is safe...only loses accuracy
.ok_or_else(|| ConfigError::expected::<Self::Value>(conf))
}
fn config_string(value: Self::Value) -> String {
format!("{}", value)
}
}
/// Value converter for byte units
///
/// This is the same as [BytesValues](crate::config::BytesValue)
/// expect it only accepts positive values and rounds to the next integer.
pub struct WholeBytesValue;
impl ConfigValueType for WholeBytesValue {
type Value = u64;
fn from_conf(conf: &Hocon) -> Result<Self::Value, ConfigError> {
let res = conf
.as_bytes()
.ok_or_else(|| ConfigError::expected::<f64>(conf))?;
config_assert!(res > 0.0, res);
let rounded = res.round() as u64;
Ok(rounded)
}
fn config_string(value: Self::Value) -> String {
BytesValue::config_string(value as f64)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::tests::{conf_test_roundtrip, str_conf};
#[test]
fn test_whole_bytes() {
let conf = str_conf("size = 1.5KiB");
let res = WholeBytesValue::from_conf(&conf["size"]);
assert_eq!(Ok(1536u64), res);
}
#[test]
fn test_whole_bytes_error() {
let conf = str_conf("size = -1.5KiB");
let res = WholeBytesValue::from_conf(&conf["size"]);
assert!(res.is_err());
println!("WholeBytesValue error message: {}", res.unwrap_err());
}
#[test]
fn test_whole_bytes_roundtrip() {
conf_test_roundtrip::<WholeBytesValue>(1536);
}
#[test]
fn test_usize_roundtrip() {
conf_test_roundtrip::<UsizeValue>(1536usize);
}
#[test]
fn test_f32_roundtrip() {
conf_test_roundtrip::<F32Value>(5.0f32);
}
}
| 26.37234 | 74 | 0.598225 |
dda6414df20aaf27987933dd2e3242827c7c7261 | 1,807 | use crate::util::*;
use crate::{ApiResult, SchemaVersion};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Deserialize, Serialize)]
pub struct AchievementsGroupsData {}
impl AchievementsGroupsData {}
#[derive(Clone)]
pub struct Builder {
client: Client,
key: Arc<Option<String>>,
version: Arc<SchemaVersion>,
url: String,
}
impl Builder {
pub async fn build(self) -> ApiResult<AchievementsGroupsData> {
todo!()
}
pub fn id(self, guid: String) -> IdBuilder {
IdBuilder {
client: self.client,
key: self.key,
version: self.version,
url: self.url + "/",
guid,
}
}
pub fn ids(self, guids: Vec<String>) -> MultiIdBuilder {
MultiIdBuilder {
client: self.client,
key: self.key,
version: self.version,
url: self.url + "?ids=",
guids,
}
}
}
impl From<super::Builder> for Builder {
fn from(source: super::Builder) -> Self {
Self {
client: source.client,
key: source.key,
version: source.version,
url: source.url + "/groups",
}
}
}
#[derive(Deserialize, Serialize)]
pub struct IdData {}
impl IdData {}
pub struct IdBuilder {
client: Client,
key: Arc<Option<String>>,
version: Arc<SchemaVersion>,
url: String,
guid: String,
}
impl IdBuilder {
pub async fn build(self) -> ApiResult<IdData> {
todo!()
}
}
pub struct MultiIdBuilder {
client: Client,
key: Arc<Option<String>>,
version: Arc<SchemaVersion>,
url: String,
guids: Vec<String>,
}
impl MultiIdBuilder {
pub async fn build(self) -> ApiResult<Vec<IdData>> {
todo!()
}
}
| 20.303371 | 67 | 0.569452 |
3abd19e5a11364d942dad58988a8be938087c8ee | 890 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Foo {
x: int,
}
pub impl Foo {
fn f(&self) {}
fn g(&const self) {}
fn h(&mut self) {}
}
fn a(x: &mut Foo) {
x.f();
x.g();
x.h();
}
fn b(x: &Foo) {
x.f();
x.g();
x.h(); //~ ERROR cannot borrow
}
fn c(x: &const Foo) {
x.f(); //~ ERROR cannot borrow
//~^ ERROR unsafe borrow
x.g();
x.h(); //~ ERROR cannot borrow
//~^ ERROR unsafe borrow
}
fn main() {
}
| 20.697674 | 68 | 0.604494 |
879b3e920ab96ba067ed9f3c8e9ee6e83b8cc974 | 3,192 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-pretty-expanded FIXME #15189
// ignore-windows FIXME #13259
#![feature(unboxed_closures)]
#![feature(unsafe_destructor)]
use std::env;
use std::old_io::process::Command;
use std::str;
use std::ops::{Drop, FnMut, FnOnce};
#[inline(never)]
fn foo() {
let _v = vec![1, 2, 3];
if env::var_os("IS_TEST").is_some() {
panic!()
}
}
#[inline(never)]
fn double() {
struct Double;
impl Drop for Double {
fn drop(&mut self) { panic!("twice") }
}
let _d = Double;
panic!("once");
}
fn runtest(me: &str) {
let mut template = Command::new(me);
template.env("IS_TEST", "1");
// Make sure that the stack trace is printed
let p = template.clone().arg("fail").env("RUST_BACKTRACE", "1").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(&out.error).unwrap();
assert!(s.contains("stack backtrace") && s.contains("foo::h"),
"bad output: {}", s);
// Make sure the stack trace is *not* printed
// (Remove RUST_BACKTRACE from our own environment, in case developer
// is running `make check` with it on.)
let p = template.clone().arg("fail").env_remove("RUST_BACKTRACE").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(&out.error).unwrap();
assert!(!s.contains("stack backtrace") && !s.contains("foo::h"),
"bad output2: {}", s);
// Make sure a stack trace is printed
let p = template.clone().arg("double-fail").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(&out.error).unwrap();
// loosened the following from double::h to double:: due to
// spurious failures on mac, 32bit, optimized
assert!(s.contains("stack backtrace") && s.contains("double::"),
"bad output3: {}", s);
// Make sure a stack trace isn't printed too many times
let p = template.clone().arg("double-fail")
.env("RUST_BACKTRACE", "1").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(&out.error).unwrap();
let mut i = 0;
for _ in 0..2 {
i += s[i + 10..].find_str("stack backtrace").unwrap() + 10;
}
assert!(s[i + 10..].find_str("stack backtrace").is_none(),
"bad output4: {}", s);
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() >= 2 && args[1] == "fail" {
foo();
} else if args.len() >= 2 && args[1] == "double-fail" {
double();
} else {
runtest(&args[0]);
}
}
| 32.242424 | 87 | 0.602444 |
0376e9003c7149ca8dd99e93cf2d14f74e221ad6 | 3,821 | use indoc::indoc;
use introspection_engine_tests::test_api::*;
use pretty_assertions::assert_eq;
use test_macros::test_each_connector;
const TYPES: &[(&str, &str)] = &[
//fieldname, db datatype
("int", "Int"),
("smallint", "SmallInt"),
("tinyint", "TinyInt"),
("bigint", "BigInt"),
("decimal", "Decimal(5,3)"),
("decimal_2", "Decimal"),
("numeric", "Numeric(4,1)"),
("numeric_2", "Numeric"),
("money", "Money"),
("smallmoney", "SmallMoney"),
("float", "Real"),
("double", "Float(53)"),
("bit", "Bit"),
("chars", "Char(10)"),
("nchars", "NChar(10)"),
("varchars", "VarChar(500)"),
("varchars_2", "VarChar(Max)"),
("nvarchars", "NVarChar(500)"),
("nvarchars_2", "NVarChar(Max)"),
("binary", "Binary(230)"),
("varbinary", "VarBinary(150)"),
("varbinary_2", "VarBinary(Max)"),
("date", "Date"),
("time", "Time"),
("datetime", "DateTime"),
("datetime2", "DateTime2"),
("xml", "Xml"),
("image", "Image"),
("text", "Text"),
("ntext", "NText"),
];
#[test_each_connector(tags("mssql_2017", "mssql_2019"))]
async fn native_type_columns_feature_on(api: &TestApi) -> crate::TestResult {
let columns: Vec<String> = TYPES
.iter()
.map(|(name, db_type)| format!("[{}] {} NOT NULL", name, db_type))
.collect();
api.barrel()
.execute_with_schema(
move |migration| {
migration.create_table("Blog", move |t| {
t.inject_custom("id int identity(1,1) primary key");
for column in &columns {
t.inject_custom(column);
}
});
},
api.db_name(),
)
.await?;
let mut dm = String::from(indoc! {r#"
generator client {
provider = "prisma-client-js"
previewFeatures = ["nativeTypes"]
}
datasource sqlserver {
provider = "sqlserver"
url = "sqlserver://localhost:1433"
}
"#});
let types = indoc! {r#"
model Blog {
id Int @id @default(autoincrement())
int Int
smallint Int @sqlserver.SmallInt
tinyint Int @sqlserver.TinyInt
bigint BigInt
decimal Decimal @sqlserver.Decimal(5, 3)
decimal_2 Decimal @sqlserver.Decimal(18, 0)
numeric Decimal @sqlserver.Decimal(4, 1)
numeric_2 Decimal @sqlserver.Decimal(18, 0)
money Float @sqlserver.Money
smallmoney Float @sqlserver.SmallMoney
float Float @sqlserver.Real
double Float
bit Boolean
chars String @sqlserver.Char(10)
nchars String @sqlserver.NChar(10)
varchars String @sqlserver.VarChar(500)
varchars_2 String @sqlserver.VarChar(Max)
nvarchars String @sqlserver.NVarChar(500)
nvarchars_2 String @sqlserver.NVarChar(Max)
binary Bytes @sqlserver.Binary(230)
varbinary Bytes @sqlserver.VarBinary(150)
varbinary_2 Bytes
date DateTime @sqlserver.Date
time DateTime @sqlserver.Time
datetime DateTime @sqlserver.DateTime
datetime2 DateTime
xml String @sqlserver.Xml
image Bytes @sqlserver.Image
text String @sqlserver.Text
ntext String @sqlserver.NText
}
"#};
let result = api.re_introspect(&dm).await?;
dm.push_str(&types);
println!("EXPECTATION: \n {:#}", dm);
println!("RESULT: \n {:#}", result);
assert_eq!(result, dm);
Ok(())
}
| 31.319672 | 77 | 0.519498 |
089d95912410421ad998d2211326e059e6859c83 | 11,328 | use crate::CliOptions;
use clap::Clap;
use genome_graph::bigraph::implementation::node_bigraph_wrapper::NodeBigraphWrapper;
use genome_graph::bigraph::traitgraph::algo::components::{
decompose_strongly_connected_components, decompose_weakly_connected_components,
};
use genome_graph::bigraph::traitgraph::implementation::petgraph_impl::petgraph::graph::DiGraph;
use genome_graph::bigraph::traitgraph::interface::GraphBase;
use genome_graph::io::wtdbg2::build_wtdbg2_unitigs_graph;
use genome_graph::types::{PetBCalm2EdgeGraph, PetWtdbg2DotGraph, PetWtdbg2Graph};
use omnitigs::unitigs::EdgeUnitigs;
use std::fs::File;
use std::io::{BufWriter, Write};
use traitsequence::interface::Sequence;
#[derive(Clap)]
pub struct ComputeUnitigsCommand {
#[clap(
short,
long,
default_value = "bcalm2",
about = "The format of the input and output files. If bcalm2, the input file is in bcalm2 format and the output file is in fasta format. If wtdbg2, the inputs are .1.nodes and the .1.reads file and the reads file from which these were generated, and the output is the .ctg.lay file. If dot, then the input is a .dot file and the output is a list of sequences of node ids."
)]
pub file_format: String,
#[clap(short, long, about = "The input files in the specified format")]
pub input: Vec<String>,
#[clap(
short,
long,
about = "The kmer size selected when generating the input with bcalm2"
)]
pub kmer_size: Option<usize>,
#[clap(
short,
long,
about = "The file the unitigs are stored into in the specified format"
)]
pub output: String,
#[clap(
short,
long,
about = "A file to output the properties and statistics computed by this command formatted as a LaTeX table"
)]
pub latex: Option<String>,
#[clap(
long,
about = "Instead of outputting unitigs as .ctg.lay file, output them as sequences of node ids"
)]
pub output_as_wtdbg2_node_ids: bool,
#[clap(
short,
long,
about = "Compare the unitigs produced by our algorithm to wtdbg2's contigs"
)]
pub compare_with_wtdbg2_contigs: bool,
}
fn print_unitig_statistics<Graph: GraphBase>(
unitigs: &EdgeUnitigs<Graph>,
latex_file: &mut Option<BufWriter<File>>,
) -> crate::Result<()> {
info!("");
info!(" === Unitig Statistics === ");
info!("");
let min_unitig_len = unitigs.iter().map(Sequence::len).min().unwrap();
let max_unitig_len = unitigs.iter().map(Sequence::len).max().unwrap();
let median_unitig_len =
statistical::median(&unitigs.iter().map(Sequence::len).collect::<Vec<_>>());
let mean_unitig_len =
statistical::mean(&unitigs.iter().map(|o| o.len() as f64).collect::<Vec<_>>());
info!("Minimum edge length: {}", min_unitig_len);
info!("Maximum edge length: {}", max_unitig_len);
info!("Median edge length: {}", median_unitig_len);
info!("Mean edge length: {:.1}", mean_unitig_len);
if let Some(latex_file) = latex_file.as_mut() {
writeln!(
latex_file,
"min non-trivial omnitigs per macrotig & N/A \\\\"
)?;
writeln!(
latex_file,
"max non-trivial omnitigs per macrotig & N/A \\\\"
)?;
writeln!(
latex_file,
"median non-trivial omnitigs per macrotig & N/A \\\\"
)?;
writeln!(
latex_file,
"mean non-trivial omnitigs per macrotig & N/A \\\\"
)?;
writeln!(latex_file, "min edge length & {} \\\\", min_unitig_len)?;
writeln!(latex_file, "max edge length & {} \\\\", max_unitig_len)?;
writeln!(
latex_file,
"median edge length & {} \\\\",
median_unitig_len
)?;
writeln!(latex_file, "mean edge length & {:.1} \\\\", mean_unitig_len)?;
}
info!("");
Ok(())
}
pub(crate) fn compute_unitigs(
_options: &CliOptions,
subcommand: &ComputeUnitigsCommand,
) -> crate::Result<()> {
let mut latex_file = if let Some(latex_file_name) = &subcommand.latex {
info!("Creating/truncating LaTeX file '{}'", latex_file_name);
Some(std::io::BufWriter::new(std::fs::File::create(
latex_file_name,
)?))
} else {
None
};
match subcommand.file_format.as_str() {
"bcalm2" => {
if subcommand.output_as_wtdbg2_node_ids {
bail!("Output as wtdbg2 node ids not supported for bcalm2 format");
}
let input = if let Some(input) = subcommand.input.first() {
input
} else {
bail!("No input file given")
};
let kmer_size = if let Some(kmer_size) = subcommand.kmer_size {
kmer_size
} else {
bail!("No kmer size given")
};
info!(
"Reading bigraph from '{}' with kmer size {}",
input, kmer_size
);
let genome_graph: PetBCalm2EdgeGraph =
genome_graph::io::bcalm2::read_bigraph_from_bcalm2_as_edge_centric_from_file(
input, kmer_size,
)?;
info!("Computing maximal unitigs");
let mut unitigs = EdgeUnitigs::compute(&genome_graph);
info!("Removing reverse complements");
unitigs.remove_reverse_complements(&genome_graph);
print_unitig_statistics(&unitigs, &mut latex_file)?;
info!("Storing unitigs as fasta to '{}'", subcommand.output);
genome_graph::io::fasta::write_walks_as_fasta_file(
&genome_graph,
kmer_size,
unitigs.iter(),
&subcommand.output,
)?;
}
"wtdbg2" => {
let nodes_file =
if let Some(file) = subcommand.input.iter().find(|f| f.ends_with(".3.nodes")) {
file
} else {
bail!("Missing .3.nodes file")
};
let reads_file =
if let Some(file) = subcommand.input.iter().find(|f| f.ends_with(".3.reads")) {
file
} else {
bail!("Missing .3.reads file")
};
let dot_file = if let Some(file) = subcommand.input.iter().find(|f| f.ends_with(".dot"))
{
file
} else {
bail!("Missing .dot file")
};
let raw_reads_file =
if let Some(file) = subcommand.input.iter().find(|f| f.ends_with(".fa")) {
file
} else {
bail!("Missing raw reads file ending on .fa")
};
info!(
"Reading bigraph from '{}', '{}', '{}' and '{}'",
nodes_file, reads_file, dot_file, raw_reads_file
);
let genome_graph: PetWtdbg2Graph =
genome_graph::io::wtdbg2::read_graph_from_wtdbg2_from_files(
nodes_file, reads_file, dot_file,
)?;
info!("Computing maximal unitigs");
let mut unitigs = EdgeUnitigs::compute(&genome_graph);
info!("Removing reverse complements");
unitigs.remove_reverse_complements(&genome_graph);
print_unitig_statistics(&unitigs, &mut latex_file)?;
if subcommand.compare_with_wtdbg2_contigs {
info!("Investigating differences between wtdbg2 and our unitigs");
let wtdbg2_unitigs_file =
dot_file[..dot_file.len() - 6].to_owned() + ".wtdbg2.ctg.lay";
info!("Loading wtdbg2 unitigs from '{}'", wtdbg2_unitigs_file);
let mut wtdbg2_unitigs =
genome_graph::io::wtdbg2::read_wtdbg2_contigs_from_file(&wtdbg2_unitigs_file)?;
info!("Converting our unitigs to .ctg.lay format");
let mut our_unitigs =
genome_graph::io::wtdbg2::convert_walks_to_wtdbg2_contigs_with_file(
&genome_graph,
unitigs.iter(),
&raw_reads_file,
)?;
info!("Sorting unitigs");
wtdbg2_unitigs.sort_contigs_topologically();
our_unitigs.sort_contigs_topologically();
wtdbg2_unitigs.update_indices();
our_unitigs.update_indices();
info!(" =========================");
info!(" === Comparing unitigs ===");
info!(" =========================");
wtdbg2_unitigs.compare_contigs(&our_unitigs);
drop(our_unitigs);
info!(" ==============================");
info!(" === Analysing unitig graph ===");
info!(" ==============================");
let unitig_graph: NodeBigraphWrapper<DiGraph<_, _, _>> =
build_wtdbg2_unitigs_graph(&wtdbg2_unitigs);
drop(wtdbg2_unitigs);
let wccs = decompose_weakly_connected_components(&unitig_graph);
info!("Unitig graph has {} wccs", wccs.len());
let sccs = decompose_strongly_connected_components(&unitig_graph);
info!("Unitig graph has {} sccs", sccs.len());
}
if subcommand.output_as_wtdbg2_node_ids {
info!("Storing unitigs as node ids to '{}'", subcommand.output);
genome_graph::io::wtdbg2::write_contigs_as_wtdbg2_node_ids_to_file(
&genome_graph,
unitigs.iter(),
&subcommand.output,
)?;
} else {
info!("Storing unitigs as .ctg.lay to '{}'", subcommand.output);
genome_graph::io::wtdbg2::write_contigs_to_wtdbg2_to_file(
&genome_graph,
unitigs.iter(),
raw_reads_file,
&subcommand.output,
)?;
}
}
"dot" => {
let dot_file = if let Some(file) = subcommand.input.iter().find(|f| f.ends_with(".dot"))
{
file
} else {
bail!("Missing .dot file")
};
info!("Reading bigraph from '{}'", dot_file);
let genome_graph: PetWtdbg2DotGraph =
genome_graph::io::wtdbg2::dot::read_graph_from_wtdbg2_dot_from_file(dot_file)?;
info!("Computing maximal unitigs");
let mut unitigs = EdgeUnitigs::compute(&genome_graph);
info!("Removing reverse complements");
unitigs.remove_reverse_complements(&genome_graph);
print_unitig_statistics(&unitigs, &mut latex_file)?;
info!("Storing unitigs as node ids to '{}'", subcommand.output);
genome_graph::io::wtdbg2::dot::write_dot_contigs_as_wtdbg2_node_ids_to_file(
&genome_graph,
unitigs.iter(),
&subcommand.output,
)?;
}
unknown => bail!("Unknown file format: {}", unknown),
}
Ok(())
}
| 37.76 | 380 | 0.542903 |
8a2fce324e745f1712bf9afc12869662ee27d514 | 119 | pub mod completions_helpers;
pub use completions_helpers::{file, folder, match_suggestions, merge_input, new_engine};
| 29.75 | 88 | 0.823529 |
1a9248b2b69f4d1702b9db3e511fba6b4a6b983c | 1,606 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated SignedSource<<63675561385d79edc101dcfe5737239c>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_by_rc/regen.sh
use ocamlrep_derive::ToOcamlRep;
use serde::Serialize;
#[allow(unused_imports)]
use crate::*;
pub use crate::error_codes::Naming;
pub use crate::error_codes::NastCheck;
pub use crate::error_codes::Parsing;
pub use crate::error_codes::Typing;
pub use oxidized::errors::ErrorCode;
/// We use `Pos.t message` on the server and convert to `Pos.absolute message`
/// before sending it to the client
pub type Message<A> = (A, std::rc::Rc<String>);
pub use oxidized::errors::Phase;
pub use oxidized::errors::Severity;
pub use oxidized::errors::Format;
pub use oxidized::errors::NameContext;
/// Results of single file analysis.
pub type FileT<A> = phase_map::PhaseMap<Vec<A>>;
/// Results of multi-file analysis.
pub type FilesT<A> = relative_path::map::Map<FileT<A>>;
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, ToOcamlRep)]
pub struct Error_<A>(pub oxidized::errors::ErrorCode, pub Vec<Message<A>>);
pub type Error = Error_<std::rc::Rc<pos::Pos>>;
#[derive(
Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, ToOcamlRep
)]
pub struct AppliedFixme(pub std::rc::Rc<pos::Pos>, pub isize);
#[derive(
Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, ToOcamlRep
)]
pub struct Errors(pub FilesT<Error>, pub FilesT<AppliedFixme>);
| 28.678571 | 78 | 0.725405 |
8fdbe5c71c4f757ed8cd60c4840c8a4f8e090594 | 3,931 | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! This program launches `fuchsia.examples.services.BankAccount` service providers and consumes
//! their instances.
//!
//! This program is written as a test so that it can be easily launched with `fx test`.
use {
fidl_fuchsia_examples_services as fexamples, fidl_fuchsia_io as fio,
fidl_fuchsia_sys2 as fsys2, fuchsia_component::client as fclient, log::*,
};
const COLLECTION_NAME: &'static str = "account_providers";
const TEST_PACKAGE: &'static str = "fuchsia-pkg://fuchsia.com/service-examples";
#[fuchsia::test]
async fn read_and_write_to_multiple_service_instances() {
// Launch two BankAccount providers into the `account_providers` collection.
let realm = fuchsia_component::client::connect_to_protocol::<fsys2::RealmMarker>()
.expect("connect to Realm service");
start_provider(&realm, "a", &format!("{}#meta/provider-a.cm", TEST_PACKAGE)).await;
start_provider(&realm, "b", &format!("{}#meta/provider-b.cm", TEST_PACKAGE)).await;
let service_dir = fclient::open_service::<fexamples::BankAccountMarker>()
.expect("failed to open service dir");
let instances = files_async::readdir(&service_dir)
.await
.expect("failed to read entries from service_dir")
.into_iter()
.map(|dirent| dirent.name);
// Debit both bank accounts by $5.
for instance in instances {
let proxy = fclient::connect_to_service_instance::<fexamples::BankAccountMarker>(&instance)
.expect("failed to connect to service instance");
let read_only_account = proxy.read_only().expect("read_only protocol");
let owner = read_only_account.get_owner().await.expect("failed to get owner");
let initial_balance = read_only_account.get_balance().await.expect("failed to get_balance");
info!("retrieved account for owner '{}' with balance ${}", &owner, &initial_balance);
let read_write_account = proxy.read_write().expect("read_write protocol");
assert_eq!(read_write_account.get_owner().await.expect("failed to get_owner"), owner);
assert_eq!(
read_write_account.get_balance().await.expect("failed to get_balance"),
initial_balance
);
info!("debiting account of owner '{}'", &owner);
read_write_account.debit(5).await.expect("failed to debit");
assert_eq!(
read_write_account.get_balance().await.expect("failed to get_balance"),
initial_balance - 5
);
}
}
async fn start_provider(realm: &fsys2::RealmProxy, name: &str, url: &str) -> fio::DirectoryProxy {
info!("creating BankAccount provider \"{}\" with url={}", name, url);
realm
.create_child(
&mut fsys2::CollectionRef { name: COLLECTION_NAME.to_string() },
fsys2::ChildDecl {
name: Some(name.to_string()),
url: Some(url.to_string()),
startup: Some(fsys2::StartupMode::Lazy),
environment: None,
..fsys2::ChildDecl::EMPTY
},
)
.await
.expect("failed to make create_child FIDL call")
.expect("failed to create_child");
let (exposed_dir, exposed_dir_server_end) =
fidl::endpoints::create_proxy::<fio::DirectoryMarker>()
.expect("failed to create endpoints");
info!("binding to BankAccount provider \"{}\" with url={}", name, url);
realm
.bind_child(
&mut fsys2::ChildRef {
name: name.to_string(),
collection: Some(COLLECTION_NAME.to_string()),
},
exposed_dir_server_end,
)
.await
.expect("failed to make bind_child FIDL call")
.expect("failed to bind_child");
exposed_dir
}
| 42.728261 | 100 | 0.646909 |
8aba39b2e970ef87f0b022d9bc4f0f55980b77d6 | 3,595 | use bytes::{BufMut, Bytes, BytesMut};
use nu_errors::ShellError;
extern crate encoding_rs;
use encoding_rs::{CoderResult, Decoder, Encoding, UTF_8};
#[cfg(not(test))]
const OUTPUT_BUFFER_SIZE: usize = 8192;
#[cfg(test)]
const OUTPUT_BUFFER_SIZE: usize = 4;
#[derive(Debug, Eq, PartialEq)]
pub enum StringOrBinary {
String(String),
Binary(Vec<u8>),
}
pub struct MaybeTextCodec {
decoder: Decoder,
}
impl MaybeTextCodec {
// The constructor takes an Option<&'static Encoding>, because an absence of an encoding indicates that we want BOM sniffing enabled
pub fn new(encoding: Option<&'static Encoding>) -> Self {
let decoder = match encoding {
Some(e) => e.new_decoder_with_bom_removal(),
None => UTF_8.new_decoder(),
};
MaybeTextCodec { decoder }
}
}
impl Default for MaybeTextCodec {
fn default() -> Self {
MaybeTextCodec {
decoder: UTF_8.new_decoder(),
}
}
}
impl futures_codec::Encoder for MaybeTextCodec {
type Item = StringOrBinary;
type Error = std::io::Error;
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
match item {
StringOrBinary::String(s) => {
dst.reserve(s.len());
dst.put(s.as_bytes());
Ok(())
}
StringOrBinary::Binary(b) => {
dst.reserve(b.len());
dst.put(Bytes::from(b));
Ok(())
}
}
}
}
impl futures_codec::Decoder for MaybeTextCodec {
type Item = StringOrBinary;
type Error = ShellError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None);
}
let mut s = String::with_capacity(OUTPUT_BUFFER_SIZE);
let (res, _read, replacements) = self.decoder.decode_to_string(src, &mut s, false);
let result = if replacements {
// If we had to make replacements when converting to utf8, fall back to binary
StringOrBinary::Binary(src.to_vec())
} else {
// If original buffer size is too small, we continue to allocate new Strings and append
// them to the result until the input buffer is smaller than the allocated String
if let CoderResult::OutputFull = res {
let mut buffer = String::with_capacity(OUTPUT_BUFFER_SIZE);
loop {
let (res, _read, _replacements) =
self.decoder
.decode_to_string(&src[s.len()..], &mut buffer, false);
s.push_str(&buffer);
if let CoderResult::InputEmpty = res {
break;
}
buffer.clear();
}
}
StringOrBinary::String(s)
};
src.clear();
Ok(Some(result))
}
}
#[cfg(test)]
mod tests {
use super::{MaybeTextCodec, StringOrBinary};
use bytes::BytesMut;
use futures_codec::Decoder;
// TODO: Write some more tests
#[test]
fn should_consume_all_bytes_from_source_when_temporary_buffer_overflows() {
let mut maybe_text = MaybeTextCodec::new(None);
let mut bytes = BytesMut::from("0123456789");
let text = maybe_text.decode(&mut bytes);
assert_eq!(
Ok(Some(StringOrBinary::String("0123456789".to_string()))),
text
);
assert!(bytes.is_empty());
}
}
| 28.085938 | 136 | 0.564673 |
fc2b9caf0d64498c6f67a5a47c075245da460cb3 | 716 | // Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
// !! DO NOT EDIT !!
// To regenerate this file, run `fuzz/generate_proto_tests.py`.
#![no_main]
#![allow(non_snake_case)]
use libfuzzer_sys::fuzz_target;
use manticore::protocol::Command;
use manticore::protocol::wire::ToWire;
use manticore::protocol::borrowed::AsStatic;
use manticore::protocol::borrowed::Borrowed;
use manticore::protocol::challenge::Challenge as C;
type Req<'a> = <C as Command<'a>>::Req;
fuzz_target!(|data: AsStatic<'static, Req<'static>>| {
let mut out = [0u8; 1024];
let _ = Req::borrow(&data).to_wire(&mut &mut out[..]);
});
| 27.538462 | 75 | 0.705307 |
01d76c7f845ba0157a2abf7ca9dd4e0295aaeac9 | 15,509 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::syntax_error::SyntaxError;
use crate::token_kind::TokenKind;
use common::{Location, Named, SourceLocationKey, Span, WithLocation};
use interner::StringKey;
use std::fmt;
pub type SyntaxResult<T> = Result<T, Vec<SyntaxError>>;
pub type ParseResult<T> = Result<T, ()>;
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Document {
pub span: Span,
pub definitions: Vec<ExecutableDefinition>,
}
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum ExecutableDefinition {
Operation(OperationDefinition),
Fragment(FragmentDefinition),
}
impl ExecutableDefinition {
pub fn location(&self) -> Location {
match self {
ExecutableDefinition::Operation(node) => node.location,
ExecutableDefinition::Fragment(node) => node.location,
}
}
pub fn name(&self) -> Option<StringKey> {
match self {
ExecutableDefinition::Operation(node) => {
if let Some(name) = &node.name {
Some(name.value)
} else {
None
}
}
ExecutableDefinition::Fragment(node) => Some(node.name.value),
}
}
}
impl fmt::Debug for ExecutableDefinition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ExecutableDefinition::Operation(node) => f.write_fmt(format_args!("{:#?}", node)),
ExecutableDefinition::Fragment(node) => f.write_fmt(format_args!("{:#?}", node)),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct OperationDefinition {
pub location: Location,
pub operation: Option<(Token, OperationKind)>,
pub name: Option<Identifier>,
pub variable_definitions: Option<List<VariableDefinition>>,
pub directives: Vec<Directive>,
pub selections: List<Selection>,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum OperationKind {
Query,
Mutation,
Subscription,
}
impl fmt::Display for OperationKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
OperationKind::Query => f.write_str("query"),
OperationKind::Mutation => f.write_str("mutation"),
OperationKind::Subscription => f.write_str("subscription"),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct FragmentDefinition {
pub location: Location,
pub fragment: Token,
pub name: Identifier,
// pub variable_definitions: Option<List<VariableDefinition>>,
pub type_condition: TypeCondition,
pub directives: Vec<Directive>,
pub selections: List<Selection>,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct VariableDefinition {
pub span: Span,
pub name: VariableIdentifier,
pub colon: Token,
pub type_: TypeAnnotation,
pub default_value: Option<DefaultValue>,
pub directives: Vec<Directive>,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Directive {
pub span: Span,
pub at: Token,
pub name: Identifier,
pub arguments: Option<List<Argument>>,
}
// Primitive Types
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct List<T> {
pub span: Span,
pub start: Token,
pub items: Vec<T>,
pub end: Token,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Argument {
pub span: Span,
pub name: Identifier,
pub colon: Token,
pub value: Value,
}
impl fmt::Display for Argument {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}: {}", self.name, self.value))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Alias {
pub span: Span,
pub alias: Identifier,
pub colon: Token,
}
impl fmt::Display for Alias {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.alias))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct TypeCondition {
pub span: Span,
pub on: Token,
pub type_: Identifier,
}
impl fmt::Display for TypeCondition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("on {}", self.type_))
}
}
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Token {
pub span: Span,
pub kind: TokenKind,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct VariableIdentifier {
pub span: Span,
pub token: Token,
pub name: StringKey,
}
impl fmt::Display for VariableIdentifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("${}", self.name))
}
}
impl VariableIdentifier {
pub fn name_with_location(&self, file: SourceLocationKey) -> WithLocation<StringKey> {
WithLocation::from_span(file, self.span, self.name)
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Identifier {
pub span: Span,
pub token: Token,
pub value: StringKey,
}
impl fmt::Display for Identifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.value))
}
}
impl Identifier {
pub fn name_with_location(&self, file: SourceLocationKey) -> WithLocation<StringKey> {
WithLocation::from_span(file, self.span, self.value)
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct DefaultValue {
pub span: Span,
pub equals: Token,
pub value: ConstantValue,
}
impl fmt::Display for DefaultValue {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.value))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum ConstantValue {
Int(IntNode),
Float(FloatNode),
String(StringNode),
Boolean(BooleanNode),
Null(Token),
Enum(EnumNode),
List(List<ConstantValue>),
Object(List<ConstantArgument>),
}
impl ConstantValue {
pub fn span(&self) -> Span {
match self {
ConstantValue::Int(value) => value.token.span,
ConstantValue::Float(value) => value.token.span,
ConstantValue::String(value) => value.token.span,
ConstantValue::Boolean(value) => value.token.span,
ConstantValue::Null(value) => value.span,
ConstantValue::Enum(value) => value.token.span,
ConstantValue::List(value) => value.span,
ConstantValue::Object(value) => value.span,
}
}
}
impl fmt::Display for ConstantValue {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ConstantValue::Int(value) => f.write_fmt(format_args!("{}", value)),
ConstantValue::Float(value) => f.write_fmt(format_args!("{}", value)),
ConstantValue::String(value) => f.write_fmt(format_args!("\"{}\"", value)),
ConstantValue::Boolean(value) => f.write_fmt(format_args!("{}", value)),
ConstantValue::Null(_) => f.write_str("null"),
ConstantValue::Enum(value) => f.write_fmt(format_args!("{}", value)),
ConstantValue::List(value) => f.write_fmt(format_args!(
"[{}]",
value
.items
.iter()
.map(|item| item.to_string())
.collect::<Vec<String>>()
.join(", ")
)),
ConstantValue::Object(value) => f.write_fmt(format_args!(
"{{{}}}",
value
.items
.iter()
.map(|item| item.to_string())
.collect::<Vec<String>>()
.join(", ")
)),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ConstantArgument {
pub span: Span,
pub name: Identifier,
pub colon: Token,
pub value: ConstantValue,
}
impl fmt::Display for ConstantArgument {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}: {}", self.name, self.value))
}
}
impl Named for ConstantArgument {
fn name(&self) -> StringKey {
self.name.value
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct IntNode {
pub token: Token,
pub value: i64,
}
impl fmt::Display for IntNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.value))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct FloatNode {
pub token: Token,
pub value: FloatValue,
/// Preserve a value, as it was represented in the source
/// TODO: We may remove this, as we migrate from JS
pub source_value: StringKey,
}
impl fmt::Display for FloatNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.source_value))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct StringNode {
pub token: Token,
pub value: StringKey,
}
impl fmt::Display for StringNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.value))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct EnumNode {
pub token: Token,
pub value: StringKey,
}
impl fmt::Display for EnumNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.value))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct BooleanNode {
pub token: Token,
pub value: bool,
}
impl fmt::Display for BooleanNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!(
"{}",
if self.value { "true" } else { "false" }
))
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum Value {
Constant(ConstantValue),
Variable(VariableIdentifier),
List(List<Value>),
Object(List<Argument>),
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Value::Constant(value) => f.write_fmt(format_args!("{}", value)),
Value::Variable(value) => f.write_fmt(format_args!("{}", value)),
Value::List(value) => f.write_fmt(format_args!(
"[{}]",
value
.items
.iter()
.map(|item| item.to_string())
.collect::<Vec<String>>()
.join(", ")
)),
Value::Object(value) => f.write_fmt(format_args!(
"{{{}}}",
value
.items
.iter()
.map(|item| item.to_string())
.collect::<Vec<String>>()
.join(", ")
)),
}
}
}
impl Value {
pub fn is_constant(&self) -> bool {
match self {
Value::Constant(..) => true,
_ => false,
}
}
pub fn span(&self) -> Span {
match self {
Value::Constant(value) => value.span(),
Value::Variable(value) => value.span,
Value::List(value) => value.span,
Value::Object(value) => value.span,
}
}
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct FloatValue(u64);
impl FloatValue {
pub fn new(v: f64) -> Self {
Self(v.to_bits())
}
pub fn as_float(self) -> f64 {
f64::from_bits(self.0)
}
}
impl fmt::Debug for FloatValue {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.as_float()))
}
}
impl fmt::Display for FloatValue {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.as_float()))
}
}
impl std::convert::From<i64> for FloatValue {
fn from(value: i64) -> Self {
FloatValue::new(value as f64)
}
}
// Types
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum TypeAnnotation {
Named(Identifier),
List(Box<ListTypeAnnotation>),
NonNull(Box<NonNullTypeAnnotation>),
}
impl fmt::Display for TypeAnnotation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TypeAnnotation::Named(named) => f.write_fmt(format_args!("{}", named)),
TypeAnnotation::List(list) => f.write_fmt(format_args!("[{}]", list.type_)),
TypeAnnotation::NonNull(non_null) => f.write_fmt(format_args!("{}!", non_null.type_)),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ListTypeAnnotation {
pub span: Span,
pub open: Token,
pub type_: TypeAnnotation,
pub close: Token,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct NonNullTypeAnnotation {
pub span: Span,
pub type_: TypeAnnotation,
pub exclamation: Token,
}
// Selections
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum Selection {
FragmentSpread(FragmentSpread),
InlineFragment(InlineFragment),
LinkedField(LinkedField),
ScalarField(ScalarField),
}
impl Selection {
pub fn span(&self) -> Span {
match self {
Selection::FragmentSpread(node) => node.span,
Selection::InlineFragment(node) => node.span,
Selection::LinkedField(node) => node.span,
Selection::ScalarField(node) => node.span,
}
}
}
impl fmt::Debug for Selection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Selection::FragmentSpread(node) => f.write_fmt(format_args!("{:#?}", node)),
Selection::InlineFragment(node) => f.write_fmt(format_args!("{:#?}", node)),
Selection::LinkedField(node) => f.write_fmt(format_args!("{:#?}", node)),
Selection::ScalarField(node) => f.write_fmt(format_args!("{:#?}", node)),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct FragmentSpread {
pub span: Span,
pub spread: Token,
pub name: Identifier,
pub directives: Vec<Directive>,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct InlineFragment {
pub span: Span,
pub spread: Token,
pub type_condition: Option<TypeCondition>,
pub directives: Vec<Directive>,
pub selections: List<Selection>,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct LinkedField {
pub span: Span,
pub alias: Option<Alias>,
pub name: Identifier,
pub arguments: Option<List<Argument>>,
pub directives: Vec<Directive>,
pub selections: List<Selection>,
}
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ScalarField {
pub span: Span,
pub alias: Option<Alias>,
pub name: Identifier,
pub arguments: Option<List<Argument>>,
pub directives: Vec<Directive>,
}
| 28.096014 | 98 | 0.595912 |
2815e126f36999c76bc5fbdaaf4e08b41a09f48e | 602 | //! Basic cleaner module for Maven projects.
use super::Cleaner;
use std::io;
/// Cleaner implementation for Maven projects.
pub struct MavenCleaner;
impl Cleaner for MavenCleaner {
/// Returns the name of this cleaner.
fn name(&self) -> &str {
"Maven"
}
/// Returns the triggers associated with this cleaner.
fn triggers(&self) -> &[&str] {
&["pom.xml"]
}
/// Cleans the provided directory based on a Git structure.
fn clean(&self, dir: &str) -> io::Result<()> {
super::cmd(dir, "mvn", &["clean"])?;
super::del(dir, "target")
}
}
| 25.083333 | 63 | 0.596346 |
d9d6071ca050412145632e81ddb6c06d22bb20f4 | 1,574 | #![allow(unused_imports)]
use super::*;
use wasm_bindgen::prelude::*;
#[cfg(web_sys_unstable_apis)]
#[wasm_bindgen]
extern "C" {
# [wasm_bindgen (extends = :: js_sys :: Object , js_name = GPUExternalTextureBindingLayout)]
#[derive(Debug, Clone, PartialEq, Eq)]
#[doc = "The `GpuExternalTextureBindingLayout` dictionary."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `GpuExternalTextureBindingLayout`*"]
#[doc = ""]
#[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"]
#[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"]
pub type GpuExternalTextureBindingLayout;
}
#[cfg(web_sys_unstable_apis)]
impl GpuExternalTextureBindingLayout {
#[doc = "Construct a new `GpuExternalTextureBindingLayout`."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `GpuExternalTextureBindingLayout`*"]
#[doc = ""]
#[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"]
#[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"]
pub fn new() -> Self {
#[allow(unused_mut)]
let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new());
ret
}
}
#[cfg(web_sys_unstable_apis)]
impl Default for GpuExternalTextureBindingLayout {
fn default() -> Self {
Self::new()
}
}
| 42.540541 | 128 | 0.677255 |
3a8042e96f3d6fa295757e8f37f2856a5570379f | 11,017 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use eq_modulo_pos::EqModuloPos;
use intern::string::BytesId;
use oxidized::file_pos_small::FilePosSmall;
use oxidized::pos_span_raw::PosSpanRaw;
use oxidized::pos_span_tiny::PosSpanTiny;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::fmt;
use std::hash::Hash;
mod relative_path;
mod symbol;
mod to_oxidized;
pub use oxidized::file_pos_large::FilePosLarge;
pub use relative_path::*;
pub use symbol::*;
pub use to_oxidized::ToOxidized;
pub trait Pos:
Eq
+ Hash
+ Clone
+ std::fmt::Debug
+ Serialize
+ DeserializeOwned
+ for<'a> From<&'a oxidized::pos::Pos>
+ for<'a> From<&'a oxidized_by_ref::pos::Pos<'a>>
+ for<'a> ToOxidized<'a, Output = &'a oxidized_by_ref::pos::Pos<'a>>
+ EqModuloPos
+ 'static
{
/// Make a new instance. If the implementing Pos is stateful,
/// it will call cons() to obtain interned values to construct the instance.
fn mk(cons: impl FnOnce() -> (RelativePath, FilePosLarge, FilePosLarge)) -> Self;
fn none() -> Self;
fn from_ast(pos: &oxidized::pos::Pos) -> Self {
Self::mk(|| {
let PosSpanRaw { start, end } = pos.to_raw_span();
(pos.filename().into(), start, end)
})
}
fn from_decl(pos: &oxidized_by_ref::pos::Pos<'_>) -> Self {
Self::mk(|| {
let PosSpanRaw { start, end } = pos.to_raw_span();
(pos.filename().into(), start, end)
})
}
fn is_hhi(&self) -> bool;
}
/// Represents a closed-ended range [start, end] in a file.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
enum PosImpl {
Small {
prefix: Prefix,
suffix: BytesId,
span: Box<(FilePosSmall, FilePosSmall)>,
},
Large {
prefix: Prefix,
suffix: BytesId,
span: Box<(FilePosLarge, FilePosLarge)>,
},
Tiny {
prefix: Prefix,
suffix: BytesId,
span: PosSpanTiny,
},
}
static_assertions::assert_eq_size!(PosImpl, u128);
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct BPos(PosImpl);
impl Pos for BPos {
fn mk(cons: impl FnOnce() -> (RelativePath, FilePosLarge, FilePosLarge)) -> Self {
let (file, start, end) = cons();
Self::new(file, start, end)
}
fn none() -> Self {
BPos::none()
}
fn is_hhi(&self) -> bool {
let BPos(pos_impl) = self;
let prefix = match *pos_impl {
PosImpl::Small { prefix, .. } => prefix,
PosImpl::Large { prefix, .. } => prefix,
PosImpl::Tiny { prefix, .. } => prefix,
};
prefix == Prefix::Hhi
}
}
impl BPos {
pub fn new(file: RelativePath, start: FilePosLarge, end: FilePosLarge) -> Self {
let prefix = file.prefix();
let suffix = file.suffix();
if let Some(span) = PosSpanTiny::make(&start, &end) {
return BPos(PosImpl::Tiny {
prefix,
suffix,
span,
});
}
let (lnum, bol, offset) = start.line_beg_offset();
if let Some(start) = FilePosSmall::from_lnum_bol_offset(lnum, bol, offset) {
let (lnum, bol, offset) = end.line_beg_offset();
if let Some(end) = FilePosSmall::from_lnum_bol_offset(lnum, bol, offset) {
let span = Box::new((start, end));
return BPos(PosImpl::Small {
prefix,
suffix,
span,
});
}
}
let span = Box::new((start, end));
BPos(PosImpl::Large {
prefix,
suffix,
span,
})
}
pub const fn none() -> Self {
let file = RelativePath::empty();
Self(PosImpl::Tiny {
prefix: file.prefix(),
suffix: file.suffix(),
span: PosSpanTiny::make_dummy(),
})
}
pub fn is_none(&self) -> bool {
match self {
BPos(PosImpl::Tiny { span, .. }) => span.is_dummy() && self.file().is_empty(),
_ => false,
}
}
pub const fn file(&self) -> RelativePath {
match self.0 {
PosImpl::Small { prefix, suffix, .. }
| PosImpl::Large { prefix, suffix, .. }
| PosImpl::Tiny { prefix, suffix, .. } => RelativePath::from_bytes_id(prefix, suffix),
}
}
}
impl fmt::Debug for BPos {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut do_fmt = |start_line, start_col, end_line, end_col| {
if start_line == end_line {
write!(
f,
"Pos({:?}, {}:{}-{})",
&self.file(),
&start_line,
&(start_col + 1),
&(end_col + 1),
)
} else {
write!(
f,
"Pos({:?}, {}:{}-{}:{})",
&self.file(),
&start_line,
&(start_col + 1),
&end_line,
&(end_col + 1),
)
}
};
if self.is_none() {
return write!(f, "Pos(None)");
}
match &self.0 {
PosImpl::Small { span, .. } => {
let (start, end) = &**span;
do_fmt(start.line(), start.column(), end.line(), end.column())
}
PosImpl::Large { span, .. } => {
let (start, end) = &**span;
do_fmt(start.line(), start.column(), end.line(), end.column())
}
PosImpl::Tiny { span, .. } => {
let span = span.to_raw_span();
do_fmt(
span.start.line(),
span.start.column(),
span.end.line(),
span.end.column(),
)
}
}
}
}
impl EqModuloPos for BPos {
fn eq_modulo_pos(&self, _rhs: &Self) -> bool {
true
}
}
impl<'a> From<&'a oxidized::pos::Pos> for BPos {
fn from(pos: &'a oxidized::pos::Pos) -> Self {
Self::from_ast(pos)
}
}
impl<'a> From<&'a oxidized_by_ref::pos::Pos<'a>> for BPos {
fn from(pos: &'a oxidized_by_ref::pos::Pos<'a>) -> Self {
Self::from_decl(pos)
}
}
impl<'a> ToOxidized<'a> for BPos {
type Output = &'a oxidized_by_ref::pos::Pos<'a>;
fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output {
let file = self.file().to_oxidized(arena);
arena.alloc(match &self.0 {
PosImpl::Small { span, .. } => {
let (start, end) = **span;
oxidized_by_ref::pos::Pos::from_raw_span(
arena,
file,
PosSpanRaw {
start: start.into(),
end: end.into(),
},
)
}
PosImpl::Large { span, .. } => {
let (start, end) = **span;
oxidized_by_ref::pos::Pos::from_raw_span(arena, file, PosSpanRaw { start, end })
}
PosImpl::Tiny { span, .. } => {
let span = span.to_raw_span();
oxidized_by_ref::pos::Pos::from_raw_span(arena, file, span)
}
})
}
}
/// A stateless sentinel Pos.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct NPos;
impl Pos for NPos {
fn mk(_cons: impl FnOnce() -> (RelativePath, FilePosLarge, FilePosLarge)) -> Self {
NPos
}
fn none() -> Self {
NPos
}
fn is_hhi(&self) -> bool {
false // See T81321312.
// Note(SF, 2022-03-23): Jake advises "This definition will lead to a
// small behavior difference between `NPos` and `BPos`: when
// typechecking in posisition-free mode we'll register depedencies on
// hhi files but in positioned mode we won't. If this turns out to be
// problematic, one solution is to make `NPos` store a `u8` rather than
// being zero-sized and in that we can store a bit for whether the
// position is in a hhi file."
}
}
impl EqModuloPos for NPos {
fn eq_modulo_pos(&self, _rhs: &Self) -> bool {
true
}
}
impl<'a> From<&'a oxidized::pos::Pos> for NPos {
fn from(pos: &'a oxidized::pos::Pos) -> Self {
Self::from_ast(pos)
}
}
impl<'a> From<&'a oxidized_by_ref::pos::Pos<'a>> for NPos {
fn from(pos: &'a oxidized_by_ref::pos::Pos<'a>) -> Self {
Self::from_decl(pos)
}
}
impl<'a> ToOxidized<'a> for NPos {
type Output = &'a oxidized_by_ref::pos::Pos<'a>;
fn to_oxidized(&self, _arena: &'a bumpalo::Bump) -> Self::Output {
oxidized_by_ref::pos::Pos::none()
}
}
#[derive(Clone, PartialEq, Eq, EqModuloPos, Hash, Serialize, Deserialize)]
pub struct Positioned<S, P> {
// Caution: field order will matter if we ever derive
// `ToOcamlRep`/`FromOcamlRep` for this type.
pos: P,
id: S,
}
impl<S: fmt::Debug, P: fmt::Debug> fmt::Debug for Positioned<S, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if std::mem::size_of::<P>() == 0 {
write!(f, "{:?}", &self.id)
} else {
f.debug_tuple("").field(&self.pos).field(&self.id).finish()
}
}
}
impl<S, P> Positioned<S, P> {
pub fn new(pos: P, id: S) -> Self {
Self { pos, id }
}
pub fn pos(&self) -> &P {
&self.pos
}
pub fn id_ref(&self) -> &S {
&self.id
}
}
impl<S: Copy, P> Positioned<S, P> {
pub fn id(&self) -> S {
self.id
}
}
impl<'a, S: From<&'a str>, P: Pos> From<&'a oxidized::ast_defs::Id> for Positioned<S, P> {
fn from(pos_id: &'a oxidized::ast_defs::Id) -> Self {
let oxidized::ast_defs::Id(pos, id) = pos_id;
Self::new(Pos::from_ast(pos), S::from(id))
}
}
impl<'a, S: From<&'a str>, P: Pos> From<oxidized_by_ref::ast_defs::Id<'a>> for Positioned<S, P> {
fn from(pos_id: oxidized_by_ref::ast_defs::Id<'a>) -> Self {
let oxidized_by_ref::ast_defs::Id(pos, id) = pos_id;
Self::new(Pos::from_decl(pos), S::from(id))
}
}
impl<'a, S: From<&'a str>, P: Pos> From<oxidized_by_ref::typing_defs::PosId<'a>>
for Positioned<S, P>
{
fn from(pos_id: oxidized_by_ref::typing_defs::PosId<'a>) -> Self {
let (pos, id) = pos_id;
Self::new(Pos::from_decl(pos), S::from(id))
}
}
impl<'a, S: ToOxidized<'a, Output = &'a str>, P: Pos> ToOxidized<'a> for Positioned<S, P> {
type Output = oxidized_by_ref::typing_reason::PosId<'a>;
fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output {
(self.pos.to_oxidized(arena), self.id.to_oxidized(arena))
}
}
| 29.300532 | 98 | 0.515839 |
e8db1c01bbff13ca360aceed77cbf39ae7ee56d6 | 2,689 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub struct Config {
pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>,
pub(crate) region: Option<aws_types::region::Region>,
pub(crate) credentials_provider:
std::sync::Arc<dyn aws_auth::provider::AsyncProvideCredentials>,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut config = f.debug_struct("Config");
config.finish()
}
}
impl Config {
pub fn builder() -> Builder {
Builder::default()
}
/// The signature version 4 service signing name to use in the credential scope when signing requests.
///
/// The signing service may be overidden by the `Endpoint`, or by specifying a custom [`SigningService`](aws_types::SigningService) during
/// operation construction
pub fn signing_service(&self) -> &'static str {
"workmailmessageflow"
}
}
#[derive(Default)]
pub struct Builder {
endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>,
region: Option<aws_types::region::Region>,
credentials_provider: Option<std::sync::Arc<dyn aws_auth::provider::AsyncProvideCredentials>>,
}
impl Builder {
pub fn new() -> Self {
Self::default()
}
pub fn endpoint_resolver(
mut self,
endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static,
) -> Self {
self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver));
self
}
pub fn region(mut self, region_provider: impl aws_types::region::ProvideRegion) -> Self {
self.region = region_provider.region();
self
}
/// Set the credentials provider for this service
pub fn credentials_provider(
mut self,
credentials_provider: impl aws_auth::provider::AsyncProvideCredentials + 'static,
) -> Self {
self.credentials_provider = Some(std::sync::Arc::new(credentials_provider));
self
}
pub fn build(self) -> Config {
Config {
endpoint_resolver: self
.endpoint_resolver
.unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())),
region: {
use aws_types::region::ProvideRegion;
self.region
.or_else(|| aws_types::region::default_provider().region())
},
credentials_provider: self
.credentials_provider
.unwrap_or_else(|| std::sync::Arc::new(aws_auth::provider::default_provider())),
}
}
}
| 37.873239 | 142 | 0.632949 |
e6e8b911852b6b1d21ae505f7eb448d646615cd6 | 10,755 | #![feature(duration_float)]
#[macro_use]
extern crate clap;
use noria::{Builder, DataType, LocalAuthority, ReuseConfigType, SyncHandle};
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::{thread, time};
#[macro_use]
mod populate;
use crate::populate::Populate;
pub struct Backend {
g: SyncHandle<LocalAuthority>,
}
#[derive(PartialEq)]
enum PopulateType {
Before,
After,
NoPopulate,
}
impl Backend {
pub fn new(partial: bool, _shard: bool, reuse: &str) -> Backend {
let mut cb = Builder::default();
let log = noria::logger_pls();
let blender_log = log.clone();
if !partial {
cb.disable_partial();
}
match reuse {
"finkelstein" => cb.set_reuse(ReuseConfigType::Finkelstein),
"full" => cb.set_reuse(ReuseConfigType::Full),
"noreuse" => cb.set_reuse(ReuseConfigType::NoReuse),
"relaxed" => cb.set_reuse(ReuseConfigType::Relaxed),
_ => panic!("reuse configuration not supported"),
}
cb.log_with(blender_log);
let g = cb.start_simple().unwrap();
Backend { g }
}
pub fn populate(&mut self, name: &'static str, mut records: Vec<Vec<DataType>>) -> usize {
let mut mutator = self.g.table(name).unwrap().into_sync();
let start = time::Instant::now();
let i = records.len();
for r in records.drain(..) {
mutator.insert(r).unwrap();
}
let dur = start.elapsed().as_secs_f64();
println!(
"Inserted {} {} in {:.2}s ({:.2} PUTs/sec)!",
i,
name,
dur,
i as f64 / dur
);
i
}
fn login(&mut self, user_context: HashMap<String, DataType>) -> Result<(), String> {
self.g
.on_worker(|w| w.create_universe(user_context.clone()))
.unwrap();
Ok(())
}
fn set_security_config(&mut self, config_file: &str) {
use std::io::Read;
let mut config = String::new();
let mut cf = File::open(config_file).unwrap();
cf.read_to_string(&mut config).unwrap();
// Install recipe with policies
self.g.on_worker(|w| w.set_security_config(config)).unwrap();
}
fn migrate(&mut self, schema_file: &str, query_file: Option<&str>) -> Result<(), String> {
use std::io::Read;
// Read schema file
let mut sf = File::open(schema_file).unwrap();
let mut s = String::new();
sf.read_to_string(&mut s).unwrap();
let mut rs = s.clone();
s.clear();
// Read query file
match query_file {
None => (),
Some(qf) => {
let mut qf = File::open(qf).unwrap();
qf.read_to_string(&mut s).unwrap();
rs.push_str("\n");
rs.push_str(&s);
}
}
// Install recipe
self.g.install_recipe(&rs).unwrap();
Ok(())
}
}
fn make_user(id: i32) -> HashMap<String, DataType> {
let mut user = HashMap::new();
user.insert(String::from("id"), id.into());
user
}
fn main() {
use clap::{App, Arg};
let args = App::new("piazza")
.version("0.1")
.about("Benchmarks Piazza-like application with security policies.")
.arg(
Arg::with_name("schema")
.short("s")
.required(true)
.default_value("benchmarks/piazza/schema.sql")
.help("Schema file for Piazza application"),
)
.arg(
Arg::with_name("queries")
.short("q")
.required(true)
.default_value("benchmarks/piazza/post-queries.sql")
.help("Query file for Piazza application"),
)
.arg(
Arg::with_name("policies")
.long("policies")
.required(true)
.default_value("benchmarks/piazza/complex-policies.json")
.help("Security policies file for Piazza application"),
)
.arg(
Arg::with_name("graph")
.short("g")
.default_value("pgraph.gv")
.help("File to dump application's soup graph, if set"),
)
.arg(
Arg::with_name("info")
.short("i")
.takes_value(true)
.help("Directory to dump runtime process info (doesn't work on OSX)"),
)
.arg(
Arg::with_name("reuse")
.long("reuse")
.default_value("full")
.possible_values(&["noreuse", "finkelstein", "relaxed", "full"])
.help("Query reuse algorithm"),
)
.arg(
Arg::with_name("shard")
.long("shard")
.help("Enable sharding"),
)
.arg(
Arg::with_name("partial")
.long("partial")
.help("Enable partial materialization"),
)
.arg(
Arg::with_name("populate")
.long("populate")
.default_value("nopopulate")
.possible_values(&["after", "before", "nopopulate"])
.help("Populate app with randomly generated data"),
)
.arg(
Arg::with_name("nusers")
.short("u")
.default_value("1000")
.help("Number of users in the db"),
)
.arg(
Arg::with_name("nlogged")
.short("l")
.default_value("1000")
.help(
"Number of logged users. Must be less or equal than the number of users in the db",
),
)
.arg(
Arg::with_name("nclasses")
.short("c")
.default_value("100")
.help("Number of classes in the db"),
)
.arg(
Arg::with_name("nposts")
.short("p")
.default_value("100000")
.help("Number of posts in the db"),
)
.arg(
Arg::with_name("private")
.long("private")
.default_value("0.1")
.help("Percentage of private posts"),
)
.get_matches();
println!("Starting benchmark...");
// Read arguments
let sloc = args.value_of("schema").unwrap();
let qloc = args.value_of("queries").unwrap();
let ploc = args.value_of("policies").unwrap();
let gloc = args.value_of("graph");
let iloc = args.value_of("info");
let partial = args.is_present("partial");
let shard = args.is_present("shard");
let reuse = args.value_of("reuse").unwrap();
let populate = args.value_of("populate").unwrap_or("nopopulate");
let nusers = value_t_or_exit!(args, "nusers", i32);
let nlogged = value_t_or_exit!(args, "nlogged", i32);
let nclasses = value_t_or_exit!(args, "nclasses", i32);
let nposts = value_t_or_exit!(args, "nposts", i32);
let private = value_t_or_exit!(args, "private", f32);
assert!(
nlogged <= nusers,
"nusers must be greater or equal to nlogged"
);
assert!(
nusers >= populate::TAS_PER_CLASS as i32,
"nusers must be greater or equal to TAS_PER_CLASS"
);
// Initiliaze backend application with some queries and policies
println!("Initiliazing database schema...");
let mut backend = Backend::new(partial, shard, reuse);
backend.migrate(sloc, None).unwrap();
backend.set_security_config(ploc);
backend.migrate(sloc, Some(qloc)).unwrap();
let populate = match populate {
"before" => PopulateType::Before,
"after" => PopulateType::After,
_ => PopulateType::NoPopulate,
};
let mut p = Populate::new(nposts, nusers, nclasses, private);
p.enroll_students();
let roles = p.get_roles();
let users = p.get_users();
let posts = p.get_posts();
let classes = p.get_classes();
backend.populate("Role", roles);
println!("Waiting for groups to be constructed...");
thread::sleep(time::Duration::from_millis(120 * (nclasses as u64)));
backend.populate("User", users);
backend.populate("Class", classes);
if populate == PopulateType::Before {
backend.populate("Post", posts.clone());
println!("Waiting for posts to propagate...");
thread::sleep(time::Duration::from_millis((nposts / 10) as u64));
}
println!("Finished writing! Sleeping for 2 seconds...");
thread::sleep(time::Duration::from_millis(2000));
// if partial, read 25% of the keys
if partial {
let leaf = "posts".to_string();
let mut getter = backend.g.view(&leaf).unwrap().into_sync();
for author in 0..nusers / 4 {
getter.lookup(&[author.into()], false).unwrap();
}
}
// Login a user
println!("Login in users...");
for i in 0..nlogged {
let start = time::Instant::now();
backend.login(make_user(i)).is_ok();
let dur = start.elapsed().as_secs_f64();
println!("Migration {} took {:.2}s!", i, dur,);
// if partial, read 25% of the keys
if partial {
let leaf = format!("posts_u{}", i);
let mut getter = backend.g.view(&leaf).unwrap().into_sync();
for author in 0..nusers / 4 {
getter.lookup(&[author.into()], false).unwrap();
}
}
if iloc.is_some() && i % 50 == 0 {
use std::fs;
let fname = format!("{}-{}", iloc.unwrap(), i);
fs::copy("/proc/self/status", fname).unwrap();
}
}
if populate == PopulateType::After {
backend.populate("Post", posts);
}
if !partial {
let mut dur = time::Duration::from_millis(0);
for uid in 0..nlogged {
let leaf = format!("posts_u{}", uid);
let mut getter = backend.g.view(&leaf).unwrap().into_sync();
let start = time::Instant::now();
for author in 0..nusers {
getter.lookup(&[author.into()], true).unwrap();
}
dur += start.elapsed();
}
let dur = dur.as_secs_f64();
println!(
"Read {} keys in {:.2}s ({:.2} GETs/sec)!",
nlogged * nusers,
dur,
f64::from(nlogged * nusers) / dur,
);
}
println!("Done with benchmark.");
if gloc.is_some() {
let graph_fname = gloc.unwrap();
let mut gf = File::create(graph_fname).unwrap();
assert!(write!(gf, "{}", backend.g.graphviz().unwrap()).is_ok());
}
}
| 30.12605 | 99 | 0.521897 |
feaf120523c75f2b2ebd5efeef554dccd25e9235 | 416 | #[doc = "Reader of register EVENTS_LASTTX"]
pub type R = crate::R<u32, super::EVENTS_LASTTX>;
#[doc = "Writer for register EVENTS_LASTTX"]
pub type W = crate::W<u32, super::EVENTS_LASTTX>;
#[doc = "Register EVENTS_LASTTX `reset()`'s with value 0"]
impl crate::ResetValue for super::EVENTS_LASTTX {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
impl R {}
impl W {}
| 27.733333 | 58 | 0.649038 |
8fa99bc64dba3f2801c9d39ae8aa3145524c36bc | 4,926 | //! Benchmark comparing the current GCD implemtation against an older one.
#![feature(test)]
extern crate num_integer;
extern crate num_traits;
extern crate test;
use num_integer::Integer;
use num_traits::{AsPrimitive, Bounded, Signed};
use test::{black_box, Bencher};
trait GcdOld: Integer {
fn gcd_old(&self, other: &Self) -> Self;
}
macro_rules! impl_gcd_old_for_isize {
($T:ty) => {
impl GcdOld for $T {
/// Calculates the Greatest Common Divisor (GCD) of the number and
/// `other`. The result is always positive.
#[inline]
fn gcd_old(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 {
return (m | n).abs();
}
// find common factors of 2
let shift = (m | n).trailing_zeros();
// The algorithm needs positive numbers, but the minimum value
// can't be represented as a positive one.
// It's also a power of two, so the gcd can be
// calculated by bitshifting in that case
// Assuming two's complement, the number created by the shift
// is positive for all numbers except gcd = abs(min value)
// The call to .abs() causes a panic in debug mode
if m == Self::min_value() || n == Self::min_value() {
return (1 << shift).abs();
}
// guaranteed to be positive now, rest like unsigned algorithm
m = m.abs();
n = n.abs();
// divide n and m by 2 until odd
// m inside loop
n >>= n.trailing_zeros();
while m != 0 {
m >>= m.trailing_zeros();
if n > m {
std::mem::swap(&mut n, &mut m)
}
m -= n;
}
n << shift
}
}
};
}
impl_gcd_old_for_isize!(i8);
impl_gcd_old_for_isize!(i16);
impl_gcd_old_for_isize!(i32);
impl_gcd_old_for_isize!(i64);
impl_gcd_old_for_isize!(isize);
impl_gcd_old_for_isize!(i128);
macro_rules! impl_gcd_old_for_usize {
($T:ty) => {
impl GcdOld for $T {
/// Calculates the Greatest Common Divisor (GCD) of the number and
/// `other`. The result is always positive.
#[inline]
fn gcd_old(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 {
return m | n;
}
// find common factors of 2
let shift = (m | n).trailing_zeros();
// divide n and m by 2 until odd
// m inside loop
n >>= n.trailing_zeros();
while m != 0 {
m >>= m.trailing_zeros();
if n > m {
std::mem::swap(&mut n, &mut m)
}
m -= n;
}
n << shift
}
}
};
}
impl_gcd_old_for_usize!(u8);
impl_gcd_old_for_usize!(u16);
impl_gcd_old_for_usize!(u32);
impl_gcd_old_for_usize!(u64);
impl_gcd_old_for_usize!(usize);
impl_gcd_old_for_usize!(u128);
/// Return an iterator that yields all Fibonacci numbers fitting into a u128.
fn fibonacci() -> impl Iterator<Item=u128> {
(0..185).scan((0, 1), |&mut (ref mut a, ref mut b), _| {
let tmp = *a;
*a = *b;
*b += tmp;
Some(*b)
})
}
fn run_bench<T: Integer + Bounded + Copy + 'static>(b: &mut Bencher, gcd: fn(&T, &T) -> T)
where
T: AsPrimitive<u128>,
u128: AsPrimitive<T>,
{
let max_value: u128 = T::max_value().as_();
let pairs: Vec<(T, T)> = fibonacci()
.collect::<Vec<_>>()
.windows(2)
.filter(|&pair| pair[0] <= max_value && pair[1] <= max_value)
.map(|pair| (pair[0].as_(), pair[1].as_()))
.collect();
b.iter(|| {
for &(ref m, ref n) in &pairs {
black_box(gcd(m, n));
}
});
}
macro_rules! bench_gcd {
($T:ident) => {
mod $T {
use crate::{run_bench, GcdOld};
use num_integer::Integer;
use test::Bencher;
#[bench]
fn bench_gcd(b: &mut Bencher) {
run_bench(b, $T::gcd);
}
#[bench]
fn bench_gcd_old(b: &mut Bencher) {
run_bench(b, $T::gcd_old);
}
}
};
}
bench_gcd!(u8);
bench_gcd!(u16);
bench_gcd!(u32);
bench_gcd!(u64);
bench_gcd!(u128);
bench_gcd!(i8);
bench_gcd!(i16);
bench_gcd!(i32);
bench_gcd!(i64);
bench_gcd!(i128);
| 27.830508 | 90 | 0.480715 |
6169187f560203a1bd87ce7b1b7f9d3502628135 | 1,713 | /*
* Copyright 2018-2020 TON DEV SOLUTIONS LTD.
*
* Licensed under the SOFTWARE EVALUATION License (the "License"); you may not use
* this file except in compliance with the License.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific TON DEV software governing permissions and
* limitations under the License.
*/
use crate::error::ClientError;
use std::fmt::Display;
const DEBOT: isize = ClientError::DEBOT; // 800
pub enum ErrorCode {
DebotStartFailed = DEBOT + 1,
DebotFetchFailed,
DebotExecutionFailed,
DebotInvalidHandle,
}
pub struct Error;
pub fn error(code: ErrorCode, message: String) -> ClientError {
ClientError::with_code_message(code as u32, message)
}
impl Error {
pub fn start_failed(err: impl Display) -> ClientError {
error(
ErrorCode::DebotStartFailed,
format!("Debot start failed: {}", err),
)
}
pub fn fetch_failed(err: impl Display) -> ClientError {
error(
ErrorCode::DebotFetchFailed,
format!("Debot fetch failed: {}", err),
)
}
pub fn execute_failed(err: impl Display) -> ClientError {
error(
ErrorCode::DebotExecutionFailed,
format!("Debot execution failed: {}", err),
)
}
pub fn invalid_handle(handle: u32) -> ClientError {
error(
ErrorCode::DebotInvalidHandle,
format!("Invalid debot handle: {}", handle),
)
}
}
| 29.033898 | 82 | 0.629305 |
330206034c7960115b0c7aeb43f23fddfbffc648 | 11,491 | //! Iterators for encoding and decoding slices of string data.
use crate::{
decode_utf16_surrogate_pair,
error::{DecodeUtf16Error, DecodeUtf32Error},
is_utf16_high_surrogate, is_utf16_low_surrogate, is_utf16_surrogate,
};
use core::{
char,
iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator},
};
/// An iterator that decodes UTF-16 encoded code points from an iterator of [`u16`]s.
///
/// This struct is created by [`decode_utf16`][crate::decode_utf16]. See its documentation for more.
///
/// This struct is identical to [`char::DecodeUtf16`] except it is a [`DoubleEndedIterator`] if
/// `I` is.
#[derive(Debug, Clone)]
pub struct DecodeUtf16<I>
where
I: Iterator<Item = u16>,
{
iter: I,
forward_buf: Option<u16>,
back_buf: Option<u16>,
}
impl<I> DecodeUtf16<I>
where
I: Iterator<Item = u16>,
{
pub(crate) fn new(iter: I) -> Self {
Self {
iter,
forward_buf: None,
back_buf: None,
}
}
}
impl<I> Iterator for DecodeUtf16<I>
where
I: Iterator<Item = u16>,
{
type Item = Result<char, DecodeUtf16Error>;
fn next(&mut self) -> Option<Self::Item> {
// Copied from char::DecodeUtf16
let u = match self.forward_buf.take() {
Some(buf) => buf,
None => self.iter.next().or_else(|| self.back_buf.take())?,
};
if !is_utf16_surrogate(u) {
// SAFETY: not a surrogate
Some(Ok(unsafe { char::from_u32_unchecked(u as u32) }))
} else if is_utf16_low_surrogate(u) {
// a trailing surrogate
Some(Err(DecodeUtf16Error::new(u)))
} else {
let u2 = match self.iter.next().or_else(|| self.back_buf.take()) {
Some(u2) => u2,
// eof
None => return Some(Err(DecodeUtf16Error::new(u))),
};
if !is_utf16_low_surrogate(u2) {
// not a trailing surrogate so we're not a valid
// surrogate pair, so rewind to redecode u2 next time.
self.forward_buf = Some(u2);
return Some(Err(DecodeUtf16Error::new(u)));
}
// all ok, so lets decode it.
// SAFETY: verified the surrogate pair
unsafe { Some(Ok(decode_utf16_surrogate_pair(u, u2))) }
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (low, high) = self.iter.size_hint();
// we could be entirely valid surrogates (2 elements per
// char), or entirely non-surrogates (1 element per char)
(low / 2, high)
}
}
impl<I> DoubleEndedIterator for DecodeUtf16<I>
where
I: Iterator<Item = u16> + DoubleEndedIterator,
{
fn next_back(&mut self) -> Option<Self::Item> {
let u2 = match self.back_buf.take() {
Some(buf) => buf,
None => self.iter.next_back().or_else(|| self.forward_buf.take())?,
};
if !is_utf16_surrogate(u2) {
// SAFETY: not a surrogate
Some(Ok(unsafe { char::from_u32_unchecked(u2 as u32) }))
} else if is_utf16_high_surrogate(u2) {
// a leading surrogate
Some(Err(DecodeUtf16Error::new(u2)))
} else {
let u = match self.iter.next_back().or_else(|| self.forward_buf.take()) {
Some(u) => u,
// eof
None => return Some(Err(DecodeUtf16Error::new(u2))),
};
if !is_utf16_high_surrogate(u) {
// not a leading surrogate so we're not a valid
// surrogate pair, so rewind to redecode u next time.
self.back_buf = Some(u);
return Some(Err(DecodeUtf16Error::new(u2)));
}
// all ok, so lets decode it.
// SAFETY: verified the surrogate pair
unsafe { Some(Ok(decode_utf16_surrogate_pair(u, u2))) }
}
}
}
impl<I> FusedIterator for DecodeUtf16<I> where I: Iterator<Item = u16> + FusedIterator {}
/// An iterator that lossily decodes possibly ill-formed UTF-16 encoded code points from an iterator
/// of [`u16`]s.
///
/// Any unpaired UTF-16 surrogate values are replaced by
/// [`U+FFFD REPLACEMENT_CHARACTER`][char::REPLACEMENT_CHARACTER] (�).
#[derive(Debug, Clone)]
pub struct DecodeUtf16Lossy<I>
where
I: Iterator<Item = u16>,
{
pub(crate) iter: DecodeUtf16<I>,
}
impl<I> Iterator for DecodeUtf16Lossy<I>
where
I: Iterator<Item = u16>,
{
type Item = char;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|res| res.unwrap_or(char::REPLACEMENT_CHARACTER))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<I> DoubleEndedIterator for DecodeUtf16Lossy<I>
where
I: Iterator<Item = u16> + DoubleEndedIterator,
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
self.iter
.next_back()
.map(|res| res.unwrap_or(char::REPLACEMENT_CHARACTER))
}
}
impl<I> FusedIterator for DecodeUtf16Lossy<I> where I: Iterator<Item = u16> + FusedIterator {}
/// An iterator that decodes UTF-32 encoded code points from an iterator of `u32`s.
#[derive(Debug, Clone)]
pub struct DecodeUtf32<I>
where
I: Iterator<Item = u32>,
{
pub(crate) iter: I,
}
impl<I> Iterator for DecodeUtf32<I>
where
I: Iterator<Item = u32>,
{
type Item = Result<char, DecodeUtf32Error>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|u| char::from_u32(u).ok_or_else(|| DecodeUtf32Error::new(u)))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<I> DoubleEndedIterator for DecodeUtf32<I>
where
I: Iterator<Item = u32> + DoubleEndedIterator,
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
self.iter
.next_back()
.map(|u| char::from_u32(u).ok_or_else(|| DecodeUtf32Error::new(u)))
}
}
impl<I> FusedIterator for DecodeUtf32<I> where I: Iterator<Item = u32> + FusedIterator {}
impl<I> ExactSizeIterator for DecodeUtf32<I>
where
I: Iterator<Item = u32> + ExactSizeIterator,
{
#[inline]
fn len(&self) -> usize {
self.iter.len()
}
}
/// An iterator that lossily decodes possibly ill-formed UTF-32 encoded code points from an iterator
/// of `u32`s.
///
/// Any invalid UTF-32 values are replaced by
/// [`U+FFFD REPLACEMENT_CHARACTER`][core::char::REPLACEMENT_CHARACTER] (�).
#[derive(Debug, Clone)]
pub struct DecodeUtf32Lossy<I>
where
I: Iterator<Item = u32>,
{
pub(crate) iter: DecodeUtf32<I>,
}
impl<I> Iterator for DecodeUtf32Lossy<I>
where
I: Iterator<Item = u32>,
{
type Item = char;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|res| res.unwrap_or(core::char::REPLACEMENT_CHARACTER))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<I> DoubleEndedIterator for DecodeUtf32Lossy<I>
where
I: Iterator<Item = u32> + DoubleEndedIterator,
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
self.iter
.next_back()
.map(|res| res.unwrap_or(core::char::REPLACEMENT_CHARACTER))
}
}
impl<I> FusedIterator for DecodeUtf32Lossy<I> where I: Iterator<Item = u32> + FusedIterator {}
impl<I> ExactSizeIterator for DecodeUtf32Lossy<I>
where
I: Iterator<Item = u32> + ExactSizeIterator,
{
#[inline]
fn len(&self) -> usize {
self.iter.len()
}
}
/// An iterator that encodes an iterator of [`char`][prim@char]s into UTF-8 bytes.
///
/// This struct is created by [`encode_utf8`][crate::encode_utf8]. See its documentation for more.
#[derive(Debug, Clone)]
pub struct EncodeUtf8<I>
where
I: Iterator<Item = char>,
{
iter: I,
buf: [u8; 4],
idx: u8,
len: u8,
}
impl<I> EncodeUtf8<I>
where
I: Iterator<Item = char>,
{
pub(crate) fn new(iter: I) -> Self {
Self {
iter,
buf: [0; 4],
idx: 0,
len: 0,
}
}
}
impl<I> Iterator for EncodeUtf8<I>
where
I: Iterator<Item = char>,
{
type Item = u8;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.idx >= self.len {
let c = self.iter.next()?;
self.idx = 0;
self.len = c.encode_utf8(&mut self.buf).len() as u8;
}
self.idx += 1;
let idx = (self.idx - 1) as usize;
Some(self.buf[idx])
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper) = self.iter.size_hint();
(lower, upper.and_then(|len| len.checked_mul(4))) // Max 4 UTF-8 bytes per char
}
}
impl<I> FusedIterator for EncodeUtf8<I> where I: Iterator<Item = char> + FusedIterator {}
/// An iterator that encodes an iterator of [`char`][prim@char]s into UTF-16 [`u16`] code units.
///
/// This struct is created by [`encode_utf16`][crate::encode_utf16]. See its documentation for more.
#[derive(Debug, Clone)]
pub struct EncodeUtf16<I>
where
I: Iterator<Item = char>,
{
iter: I,
buf: Option<u16>,
}
impl<I> EncodeUtf16<I>
where
I: Iterator<Item = char>,
{
pub(crate) fn new(iter: I) -> Self {
Self { iter, buf: None }
}
}
impl<I> Iterator for EncodeUtf16<I>
where
I: Iterator<Item = char>,
{
type Item = u16;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.buf.take().or_else(|| {
let c = self.iter.next()?;
let mut buf = [0; 2];
let buf = c.encode_utf16(&mut buf);
if buf.len() > 1 {
self.buf = Some(buf[1]);
}
Some(buf[0])
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper) = self.iter.size_hint();
(lower, upper.and_then(|len| len.checked_mul(2))) // Max 2 UTF-16 code units per char
}
}
impl<I> FusedIterator for EncodeUtf16<I> where I: Iterator<Item = char> + FusedIterator {}
/// An iterator that encodes an iterator of [`char`][prim@char]s into UTF-32 [`u32`] values.
///
/// This struct is created by [`encode_utf32`][crate::encode_utf32]. See its documentation for more.
#[derive(Debug, Clone)]
pub struct EncodeUtf32<I>
where
I: Iterator<Item = char>,
{
iter: I,
}
impl<I> EncodeUtf32<I>
where
I: Iterator<Item = char>,
{
pub(crate) fn new(iter: I) -> Self {
Self { iter }
}
}
impl<I> Iterator for EncodeUtf32<I>
where
I: Iterator<Item = char>,
{
type Item = u32;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|c| c as u32)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<I> FusedIterator for EncodeUtf32<I> where I: Iterator<Item = char> + FusedIterator {}
impl<I> ExactSizeIterator for EncodeUtf32<I>
where
I: Iterator<Item = char> + ExactSizeIterator,
{
#[inline]
fn len(&self) -> usize {
self.iter.len()
}
}
impl<I> DoubleEndedIterator for EncodeUtf32<I>
where
I: Iterator<Item = char> + DoubleEndedIterator,
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
self.iter.next_back().map(|c| c as u32)
}
}
| 25.939052 | 100 | 0.585241 |
bbb3757e2f41f2a7cb31f22a0fdab46d6a8e7f24 | 15,440 | // (c) Facebook, Inc. and its affiliates. Confidential and proprietary.
use cxx::CxxString;
use facts_rust::facts;
use oxidized::relative_path::RelativePath;
use rust_facts_ffi::{extract_facts_as_json_ffi0, extract_facts_ffi0, facts_to_json_ffi};
use std::collections::{BTreeMap, BTreeSet};
#[cxx::bridge]
mod ffi {
#[derive(Debug)]
enum TypeKind {
Class,
Record,
Interface,
Enum,
Trait,
TypeAlias,
Unknown,
Mixed,
}
#[derive(Debug, PartialEq)]
struct Attribute {
name: String,
args: Vec<String>,
}
#[derive(Debug, PartialEq)]
struct MethodFacts {
attributes: Vec<Attribute>,
}
#[derive(Debug, PartialEq)]
struct Method {
name: String,
methfacts: MethodFacts,
}
#[derive(Debug, PartialEq)]
pub struct TypeFacts {
pub base_types: Vec<String>,
pub kind: TypeKind,
pub attributes: Vec<Attribute>,
pub flags: isize,
pub require_extends: Vec<String>,
pub require_implements: Vec<String>,
pub methods: Vec<Method>,
}
#[derive(Debug, PartialEq)]
struct TypeFactsByName {
name: String,
typefacts: TypeFacts,
}
#[derive(Debug, Default, PartialEq)]
struct Facts {
pub types: Vec<TypeFactsByName>,
pub functions: Vec<String>,
pub constants: Vec<String>,
pub type_aliases: Vec<String>,
pub file_attributes: Vec<Attribute>,
}
#[derive(Debug, Default)]
struct FactsResult {
facts: Facts,
md5sum: String,
sha1sum: String,
}
extern "Rust" {
pub fn hackc_extract_facts_as_json_cpp_ffi(
flags: i32,
filename: &CxxString,
source_text: &CxxString,
) -> String;
}
extern "Rust" {
pub fn hackc_extract_facts_cpp_ffi(
flags: i32,
filename: &CxxString,
source_text: &CxxString,
) -> FactsResult;
}
extern "Rust" {
pub fn hackc_facts_to_json_cpp_ffi(facts: FactsResult, source_text: &CxxString) -> String;
}
}
pub fn hackc_extract_facts_as_json_cpp_ffi(
flags: i32,
filename: &CxxString,
source_text: &CxxString,
) -> String {
use std::os::unix::ffi::OsStrExt;
let filepath = RelativePath::make(
oxidized::relative_path::Prefix::Dummy,
std::path::PathBuf::from(std::ffi::OsStr::from_bytes(filename.as_bytes())),
);
match extract_facts_as_json_ffi0(
((1 << 0) & flags) != 0, // php5_compat_mode
((1 << 1) & flags) != 0, // hhvm_compat_mode
((1 << 2) & flags) != 0, // allow_new_attribute_syntax
((1 << 3) & flags) != 0, // enable_xhp_class_modifier
((1 << 4) & flags) != 0, // disable_xhp_element_mangling
((1 << 5) & flags) != 0, // disallow_hash_comments
filepath,
source_text.as_bytes(),
true, // mangle_xhp
) {
Some(s) => s,
None => String::new(),
}
}
pub fn hackc_extract_facts_cpp_ffi(
flags: i32,
filename: &CxxString,
source_text: &CxxString,
) -> ffi::FactsResult {
use std::os::unix::ffi::OsStrExt;
let filepath = RelativePath::make(
oxidized::relative_path::Prefix::Dummy,
std::path::PathBuf::from(std::ffi::OsStr::from_bytes(filename.as_bytes())),
);
let text = source_text.as_bytes();
match extract_facts_ffi0(
((1 << 0) & flags) != 0, // php5_compat_mode
((1 << 1) & flags) != 0, // hhvm_compat_mode
((1 << 2) & flags) != 0, // allow_new_attribute_syntax
((1 << 3) & flags) != 0, // enable_xhp_class_modifier
((1 << 4) & flags) != 0, // disable_xhp_element_mangling
((1 << 5) & flags) != 0, // disallow_hash_comments
filepath,
text,
true, // mangle_xhp
) {
Some(facts) => {
let (md5sum, sha1sum) = facts::md5_and_sha1(text);
ffi::FactsResult {
facts: facts.into(),
md5sum,
sha1sum,
}
}
None => Default::default(),
}
}
pub fn hackc_facts_to_json_cpp_ffi(facts: ffi::FactsResult, source_text: &CxxString) -> String {
let facts = facts::Facts::from(facts.facts);
let text = source_text.as_bytes();
facts_to_json_ffi(facts, text)
}
fn vec_to_map<K, V, T>(v: Vec<T>) -> BTreeMap<K, V>
where
K: std::cmp::Ord,
T: Into<(K, V)>,
{
v.into_iter().map(|x| x.into()).collect::<BTreeMap<K, V>>()
}
fn map_to_vec<K, V, T>(m: BTreeMap<K, V>) -> Vec<T>
where
T: From<(K, V)>,
{
m.into_iter()
.map(|(k, v)| T::from((k, v)))
.collect::<Vec<T>>()
}
fn vec_to_set<T: std::cmp::Ord>(v: Vec<T>) -> BTreeSet<T> {
v.into_iter().map(|item| item).collect()
}
fn set_to_vec<T>(s: BTreeSet<T>) -> Vec<T> {
s.into_iter().map(|item| item).collect()
}
impl From<ffi::TypeKind> for facts::TypeKind {
fn from(type_kind: ffi::TypeKind) -> facts::TypeKind {
match type_kind {
ffi::TypeKind::Class => facts::TypeKind::Class,
ffi::TypeKind::Record => facts::TypeKind::Record,
ffi::TypeKind::Interface => facts::TypeKind::Interface,
ffi::TypeKind::Enum => facts::TypeKind::Enum,
ffi::TypeKind::Trait => facts::TypeKind::Trait,
ffi::TypeKind::TypeAlias => facts::TypeKind::TypeAlias,
ffi::TypeKind::Unknown => facts::TypeKind::Unknown,
ffi::TypeKind::Mixed => facts::TypeKind::Mixed,
_ => panic!("impossible"),
}
}
}
impl From<facts::TypeKind> for ffi::TypeKind {
fn from(typekind: facts::TypeKind) -> ffi::TypeKind {
match typekind {
facts::TypeKind::Class => ffi::TypeKind::Class,
facts::TypeKind::Record => ffi::TypeKind::Record,
facts::TypeKind::Interface => ffi::TypeKind::Interface,
facts::TypeKind::Enum => ffi::TypeKind::Enum,
facts::TypeKind::Trait => ffi::TypeKind::Trait,
facts::TypeKind::TypeAlias => ffi::TypeKind::TypeAlias,
facts::TypeKind::Unknown => ffi::TypeKind::Unknown,
facts::TypeKind::Mixed => ffi::TypeKind::Mixed,
}
}
}
impl From<ffi::Attribute> for (String, Vec<String>) {
fn from(attr: ffi::Attribute) -> (String, Vec<String>) {
(attr.name, attr.args)
}
}
impl From<(String, Vec<String>)> for ffi::Attribute {
fn from(attr: (String, Vec<String>)) -> ffi::Attribute {
ffi::Attribute {
name: attr.0,
args: attr.1,
}
}
}
impl From<ffi::Method> for (String, facts::MethodFacts) {
fn from(meth: ffi::Method) -> (String, facts::MethodFacts) {
let ffi::Method { name, methfacts } = meth;
(name, methfacts.into())
}
}
impl From<(String, facts::MethodFacts)> for ffi::Method {
fn from(methodfacts: (String, facts::MethodFacts)) -> ffi::Method {
ffi::Method {
name: methodfacts.0.into(),
methfacts: methodfacts.1.into(),
}
}
}
impl From<ffi::MethodFacts> for facts::MethodFacts {
fn from(methodfacts: ffi::MethodFacts) -> facts::MethodFacts {
facts::MethodFacts {
attributes: vec_to_map(methodfacts.attributes),
}
}
}
impl From<facts::MethodFacts> for ffi::MethodFacts {
fn from(method_facts: facts::MethodFacts) -> ffi::MethodFacts {
ffi::MethodFacts {
attributes: map_to_vec(method_facts.attributes),
}
}
}
impl From<ffi::TypeFacts> for facts::TypeFacts {
fn from(facts: ffi::TypeFacts) -> facts::TypeFacts {
facts::TypeFacts {
base_types: vec_to_set(facts.base_types),
kind: facts.kind.into(),
attributes: vec_to_map(facts.attributes),
flags: facts.flags,
require_extends: vec_to_set(facts.require_extends),
require_implements: vec_to_set(facts.require_implements),
methods: vec_to_map(facts.methods),
}
}
}
impl From<facts::TypeFacts> for ffi::TypeFacts {
fn from(facts: facts::TypeFacts) -> ffi::TypeFacts {
ffi::TypeFacts {
base_types: set_to_vec(facts.base_types),
kind: facts.kind.into(),
attributes: map_to_vec(facts.attributes),
flags: facts.flags,
require_extends: set_to_vec(facts.require_extends),
require_implements: set_to_vec(facts.require_implements),
methods: map_to_vec(facts.methods),
}
}
}
impl From<ffi::TypeFactsByName> for (String, facts::TypeFacts) {
fn from(typefacts_by_name: ffi::TypeFactsByName) -> (String, facts::TypeFacts) {
let ffi::TypeFactsByName { name, typefacts } = typefacts_by_name;
(name, typefacts.into())
}
}
impl From<(String, facts::TypeFacts)> for ffi::TypeFactsByName {
fn from((name, typefacts): (String, facts::TypeFacts)) -> ffi::TypeFactsByName {
ffi::TypeFactsByName {
name,
typefacts: typefacts.into(),
}
}
}
impl From<ffi::Facts> for facts::Facts {
fn from(facts: ffi::Facts) -> facts::Facts {
facts::Facts {
types: vec_to_map(facts.types),
functions: facts.functions,
constants: facts.constants,
type_aliases: facts.type_aliases,
file_attributes: vec_to_map(facts.file_attributes),
}
}
}
impl From<facts::Facts> for ffi::Facts {
fn from(facts: facts::Facts) -> ffi::Facts {
ffi::Facts {
types: map_to_vec(facts.types),
functions: facts.functions,
constants: facts.constants,
type_aliases: facts.type_aliases,
file_attributes: map_to_vec(facts.file_attributes),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_method_facts_1() {
let (ffi_method_facts, rust_method_facts) = create_method_facts();
assert_eq!(
facts::MethodFacts::from(ffi_method_facts),
rust_method_facts
)
}
#[test]
fn test_method_facts_2() {
let (ffi_method_facts, mut rust_method_facts) = create_method_facts();
rust_method_facts.attributes.remove_entry("MyAttribute2");
assert_ne!(ffi::MethodFacts::from(rust_method_facts), ffi_method_facts)
}
#[test]
fn test_methods_1() {
let (ffi_methods, rust_methods) = create_methods();
assert_eq!(
map_to_vec::<String, facts::MethodFacts, ffi::Method>(rust_methods),
ffi_methods
)
}
#[test]
fn test_methods_2() {
let (ffi_methods, mut rust_methods) = create_methods();
rust_methods.clear();
assert_ne!(vec_to_map(ffi_methods), rust_methods)
}
#[test]
fn test_type_facts() {
let (ffi_type_facts, rust_type_facts) = create_type_facts();
assert_eq!(ffi::TypeFacts::from(rust_type_facts), ffi_type_facts)
}
#[test]
fn test_type_facts_by_name() {
let (ffi_type_facts_by_name, rust_type_facts_by_name) = create_type_facts_by_name();
assert_eq!(
map_to_vec::<String, facts::TypeFacts, ffi::TypeFactsByName>(rust_type_facts_by_name),
ffi_type_facts_by_name
)
}
#[test]
fn test_facts() {
let (ffi_type_facts_by_name, rust_type_facts_by_name) = create_type_facts_by_name();
let (ffi_attributes, rust_attributes) = create_attributes();
let ffi_facts = ffi::Facts {
types: ffi_type_facts_by_name,
functions: vec!["f1".to_string(), "f2".to_string()],
constants: vec!["C".to_string()],
type_aliases: vec!["foo".to_string(), "bar".to_string()],
file_attributes: ffi_attributes,
};
let rust_facts = facts::Facts {
types: rust_type_facts_by_name,
functions: vec!["f1".to_string(), "f2".to_string()],
constants: vec!["C".to_string()],
type_aliases: vec!["foo".to_string(), "bar".to_string()],
file_attributes: rust_attributes,
};
assert_eq!(facts::Facts::from(ffi_facts), rust_facts)
}
fn create_attributes() -> (Vec<ffi::Attribute>, facts::Attributes) {
let ffi_attributes = vec![
ffi::Attribute {
name: "MyAttribute1".to_string(),
args: vec!["arg1".to_string(), "arg2".to_string(), "arg3".to_string()],
},
ffi::Attribute {
name: "MyAttribute2".to_string(),
args: vec![],
},
];
let mut rust_attributes = BTreeMap::new();
rust_attributes.insert(
"MyAttribute1".to_string(),
vec!["arg1".to_string(), "arg2".to_string(), "arg3".to_string()],
);
rust_attributes.insert("MyAttribute2".to_string(), vec![]);
(ffi_attributes, rust_attributes)
}
fn create_method_facts() -> (ffi::MethodFacts, facts::MethodFacts) {
let (ffi_attributes, rust_attributes) = create_attributes();
let ffi_method_facts = ffi::MethodFacts {
attributes: ffi_attributes,
};
let rust_method_facts = facts::MethodFacts {
attributes: rust_attributes,
};
(ffi_method_facts, rust_method_facts)
}
fn create_methods() -> (Vec<ffi::Method>, facts::Methods) {
let (ffi_method_facts, rust_method_facts) = create_method_facts();
let ffi_methods = vec![ffi::Method {
name: "m".to_string(),
methfacts: ffi_method_facts,
}];
let mut rust_methods = BTreeMap::new();
rust_methods.insert("m".to_string(), rust_method_facts);
(ffi_methods, rust_methods)
}
fn create_type_facts() -> (ffi::TypeFacts, facts::TypeFacts) {
let (ffi_attributes, rust_attributes) = create_attributes();
let (ffi_methods, rust_methods) = create_methods();
let base_types = vec!["int".to_string(), "string".to_string()];
let require_extends = vec!["A".to_string()];
let require_implements = vec!["B".to_string()];
let rust_type_facts = facts::TypeFacts {
base_types: vec_to_set(base_types.clone()),
kind: facts::TypeKind::Class,
attributes: rust_attributes,
flags: 0,
require_extends: vec_to_set(require_extends.clone()),
require_implements: vec_to_set(require_implements.clone()),
methods: rust_methods,
};
let ffi_type_facts = ffi::TypeFacts {
base_types,
kind: ffi::TypeKind::Class,
attributes: ffi_attributes,
flags: 0,
require_extends,
require_implements,
methods: ffi_methods,
};
(ffi_type_facts, rust_type_facts)
}
fn create_type_facts_by_name() -> (Vec<ffi::TypeFactsByName>, facts::TypeFactsByName) {
let (ffi_type_facts, rust_type_facts) = create_type_facts();
let ffi_type_facts_by_name = vec![ffi::TypeFactsByName {
name: "C".to_string(),
typefacts: ffi_type_facts,
}];
let mut rust_type_facts_by_name = BTreeMap::new();
rust_type_facts_by_name.insert("C".to_string(), rust_type_facts);
(ffi_type_facts_by_name, rust_type_facts_by_name)
}
}
| 31.704312 | 98 | 0.587111 |
d5e48f92e73244ecc069e426ea82a8dc765e2596 | 9,643 | use swc_atoms::js_word;
use super::*;
use crate::token::Keyword;
impl Context {
pub(crate) fn is_reserved(self, word: &Word) -> bool {
match *word {
Word::Keyword(Keyword::Let) => self.strict,
Word::Keyword(Keyword::Await) => self.in_async || self.strict,
Word::Keyword(Keyword::Yield) => self.in_generator || self.strict,
Word::Null
| Word::True
| Word::False
| Word::Keyword(Keyword::Break)
| Word::Keyword(Keyword::Case)
| Word::Keyword(Keyword::Catch)
| Word::Keyword(Keyword::Continue)
| Word::Keyword(Keyword::Debugger)
| Word::Keyword(Keyword::Default_)
| Word::Keyword(Keyword::Do)
| Word::Keyword(Keyword::Export)
| Word::Keyword(Keyword::Else)
| Word::Keyword(Keyword::Finally)
| Word::Keyword(Keyword::For)
| Word::Keyword(Keyword::Function)
| Word::Keyword(Keyword::If)
| Word::Keyword(Keyword::Return)
| Word::Keyword(Keyword::Switch)
| Word::Keyword(Keyword::Throw)
| Word::Keyword(Keyword::Try)
| Word::Keyword(Keyword::Var)
| Word::Keyword(Keyword::Const)
| Word::Keyword(Keyword::While)
| Word::Keyword(Keyword::With)
| Word::Keyword(Keyword::New)
| Word::Keyword(Keyword::This)
| Word::Keyword(Keyword::Super)
| Word::Keyword(Keyword::Class)
| Word::Keyword(Keyword::Extends)
| Word::Keyword(Keyword::Import)
| Word::Keyword(Keyword::In)
| Word::Keyword(Keyword::InstanceOf)
| Word::Keyword(Keyword::TypeOf)
| Word::Keyword(Keyword::Void)
| Word::Keyword(Keyword::Delete) => true,
// Future reserved word
Word::Ident(js_word!("enum")) => true,
Word::Ident(js_word!("implements"))
| Word::Ident(js_word!("package"))
| Word::Ident(js_word!("protected"))
| Word::Ident(js_word!("interface"))
| Word::Ident(js_word!("private"))
| Word::Ident(js_word!("public"))
if self.strict =>
{
true
}
_ => false,
}
}
pub fn is_reserved_word(self, word: &JsWord) -> bool {
match *word {
js_word!("let") => self.strict,
js_word!("await") => self.in_async || self.strict,
js_word!("yield") => self.in_generator || self.strict,
js_word!("null")
| js_word!("true")
| js_word!("false")
| js_word!("break")
| js_word!("case")
| js_word!("catch")
| js_word!("continue")
| js_word!("debugger")
| js_word!("default")
| js_word!("do")
| js_word!("export")
| js_word!("else")
| js_word!("finally")
| js_word!("for")
| js_word!("function")
| js_word!("if")
| js_word!("return")
| js_word!("switch")
| js_word!("throw")
| js_word!("try")
| js_word!("var")
| js_word!("const")
| js_word!("while")
| js_word!("with")
| js_word!("new")
| js_word!("this")
| js_word!("super")
| js_word!("class")
| js_word!("extends")
| js_word!("import")
| js_word!("in")
| js_word!("instanceof")
| js_word!("typeof")
| js_word!("void")
| js_word!("delete") => true,
// Future reserved word
js_word!("enum") => true,
js_word!("implements")
| js_word!("package")
| js_word!("protected")
| js_word!("interface")
| js_word!("private")
| js_word!("public")
if self.strict =>
{
true
}
_ => false,
}
}
}
impl<'a, I: Tokens> Parser<I> {
/// Original context is restored when returned guard is dropped.
pub(super) fn with_ctx(&mut self, ctx: Context) -> WithCtx<I> {
let orig_ctx = self.ctx();
self.set_ctx(ctx);
WithCtx {
orig_ctx,
inner: self,
}
}
/// Original state is restored when returned guard is dropped.
pub(super) fn with_state(&mut self, state: State) -> WithState<I> {
let orig_state = std::mem::replace(&mut self.state, state);
WithState {
orig_state,
inner: self,
}
}
pub(super) fn set_ctx(&mut self, ctx: Context) {
self.input.set_ctx(ctx);
}
pub(super) fn strict_mode(&mut self) -> WithCtx<I> {
let ctx = Context {
strict: true,
..self.ctx()
};
self.with_ctx(ctx)
}
/// Original context is restored when returned guard is dropped.
pub(super) fn in_type(&mut self) -> WithCtx<I> {
let ctx = Context {
in_type: true,
..self.ctx()
};
self.with_ctx(ctx)
}
/// Original context is restored when returned guard is dropped.
pub(super) fn include_in_expr(&mut self, include_in_expr: bool) -> WithCtx<I> {
let ctx = Context {
include_in_expr,
..self.ctx()
};
self.with_ctx(ctx)
}
/// Parse with given closure
#[inline(always)]
pub(super) fn parse_with<F, Ret>(&mut self, f: F) -> PResult<Ret>
where
F: FnOnce(&mut Self) -> PResult<Ret>,
{
f(self)
}
pub(super) fn syntax(&self) -> Syntax {
self.input.syntax()
}
}
pub trait ParseObject<Obj> {
type Prop;
fn make_object(
&mut self,
span: Span,
props: Vec<Self::Prop>,
trailing_comma: Option<Span>,
) -> PResult<Obj>;
fn parse_object_prop(&mut self) -> PResult<Self::Prop>;
}
pub struct WithState<'w, I: 'w + Tokens> {
inner: &'w mut Parser<I>,
orig_state: State,
}
impl<'w, I: Tokens> Deref for WithState<'w, I> {
type Target = Parser<I>;
fn deref(&self) -> &Parser<I> {
self.inner
}
}
impl<'w, I: Tokens> DerefMut for WithState<'w, I> {
fn deref_mut(&mut self) -> &mut Parser<I> {
self.inner
}
}
impl<'w, I: Tokens> Drop for WithState<'w, I> {
fn drop(&mut self) {
std::mem::swap(&mut self.inner.state, &mut self.orig_state);
}
}
pub struct WithCtx<'w, I: 'w + Tokens> {
inner: &'w mut Parser<I>,
orig_ctx: Context,
}
impl<'w, I: Tokens> Deref for WithCtx<'w, I> {
type Target = Parser<I>;
fn deref(&self) -> &Parser<I> {
self.inner
}
}
impl<'w, I: Tokens> DerefMut for WithCtx<'w, I> {
fn deref_mut(&mut self) -> &mut Parser<I> {
self.inner
}
}
impl<'w, I: Tokens> Drop for WithCtx<'w, I> {
fn drop(&mut self) {
self.inner.set_ctx(self.orig_ctx);
}
}
pub(super) trait ExprExt {
fn as_expr(&self) -> &Expr;
/// "IsValidSimpleAssignmentTarget" from spec.
fn is_valid_simple_assignment_target(&self, strict: bool) -> bool {
match *self.as_expr() {
Expr::Ident(Ident { ref sym, .. }) => {
if strict && (&*sym == "arguments" || &*sym == "eval") {
return false;
}
true
}
Expr::This(..)
| Expr::Lit(..)
| Expr::Array(..)
| Expr::Object(..)
| Expr::Fn(..)
| Expr::Class(..)
| Expr::Tpl(..)
| Expr::TaggedTpl(..) => false,
Expr::Paren(ParenExpr { ref expr, .. }) => {
expr.is_valid_simple_assignment_target(strict)
}
Expr::Member(MemberExpr { ref obj, .. }) => match obj.as_ref() {
Expr::Member(..) => obj.is_valid_simple_assignment_target(strict),
Expr::OptChain(..) => false,
_ => true,
},
Expr::SuperProp(..) => true,
Expr::New(..) | Expr::Call(..) => false,
// TODO: Spec only mentions `new.target`
Expr::MetaProp(..) => false,
Expr::Update(..) => false,
Expr::Unary(..) | Expr::Await(..) => false,
Expr::Bin(..) => false,
Expr::Cond(..) => false,
Expr::Yield(..) | Expr::Arrow(..) | Expr::Assign(..) => false,
Expr::Seq(..) => false,
Expr::OptChain(..) => false,
// MemberExpression is valid assignment target
Expr::PrivateName(..) => false,
// jsx
Expr::JSXMember(..)
| Expr::JSXNamespacedName(..)
| Expr::JSXEmpty(..)
| Expr::JSXElement(..)
| Expr::JSXFragment(..) => false,
// typescript
Expr::TsNonNull(TsNonNullExpr { ref expr, .. })
| Expr::TsTypeAssertion(TsTypeAssertion { ref expr, .. })
| Expr::TsAs(TsAsExpr { ref expr, .. })
| Expr::TsInstantiation(TsInstantiation { ref expr, .. }) => {
expr.is_valid_simple_assignment_target(strict)
}
Expr::TsConstAssertion(..) => false,
Expr::Invalid(..) => false,
}
}
}
impl ExprExt for Box<Expr> {
fn as_expr(&self) -> &Expr {
&*self
}
}
impl ExprExt for Expr {
fn as_expr(&self) -> &Expr {
self
}
}
| 28.957958 | 83 | 0.483148 |
e8ae65292a3ec2d97e9f1b1e91835d577b4471c0 | 15 | pub mod queue;
| 7.5 | 14 | 0.733333 |
e5df9da4cd5901c060807a39eee539025b720f35 | 17,387 | //! This file contains interactions with `web_sys`.
use super::Namespace;
use crate::virtual_dom::{At, AtValue, Attrs, El, Mailbox, Node, Style, Text};
use wasm_bindgen::JsCast;
use web_sys::Document;
/// Convenience function to reduce repetition
fn set_style(el_ws: &web_sys::Node, style: &Style) {
el_ws
.dyn_ref::<web_sys::Element>()
.expect("Problem casting Node as Element while setting style")
.set_attribute("style", &style.to_string())
.expect("Problem setting style");
}
pub(crate) fn assign_ws_nodes_to_el<Ms>(document: &Document, el: &mut El<Ms>) {
let node_ws = make_websys_el(el, document);
for ref_ in &mut el.refs {
ref_.set(node_ws.clone());
}
el.node_ws = Some(node_ws);
for mut child in &mut el.children {
assign_ws_nodes(document, &mut child);
}
}
pub(crate) fn assign_ws_nodes_to_text(document: &Document, text: &mut Text) {
text.node_ws = Some(
document
.create_text_node(&text.text)
.dyn_into::<web_sys::Node>()
.expect("Problem casting Text as Node."),
);
}
/// Recursively create `web_sys::Node`s, and place them in the vdom Nodes' fields.
pub(crate) fn assign_ws_nodes<Ms>(document: &Document, node: &mut Node<Ms>) {
match node {
Node::Element(el) => assign_ws_nodes_to_el(document, el),
Node::Text(text) => assign_ws_nodes_to_text(document, text),
Node::Empty => (),
}
}
fn node_to_element(el_ws: &web_sys::Node) -> Result<&web_sys::Element, &'static str> {
if let web_sys::Node::ELEMENT_NODE = el_ws.node_type() {
el_ws
.dyn_ref::<web_sys::Element>()
.ok_or("Problem casting Node as Element")
} else {
Err("Node isn't Element!")
}
}
fn set_attr_value(el_ws: &web_sys::Node, at: &At, at_value: &AtValue) {
match at_value {
AtValue::Some(value) => {
node_to_element(el_ws)
.and_then(|element| {
element
.set_attribute(at.as_str(), value)
.map_err(|_| "Problem setting an atrribute.")
})
.unwrap_or_else(|err| {
crate::error(err);
});
}
AtValue::None => {
node_to_element(el_ws)
.and_then(|element| {
element
.set_attribute(at.as_str(), "")
.map_err(|_| "Problem setting an atrribute.")
})
.unwrap_or_else(|err| {
crate::error(err);
});
}
AtValue::Ignored => {
node_to_element(el_ws)
.and_then(|element| {
element
.remove_attribute(at.as_str())
.map_err(|_| "Problem removing an atrribute.")
})
.unwrap_or_else(|err| {
crate::error(err);
});
}
}
}
/// Create and return a `web_sys` Element from our virtual-dom `El`. The `web_sys`
/// Element is a close analog to JS/DOM elements.
///
/// # References
/// * [`web_sys` Element](https://rustwasm.github.io/wasm-bindgen/api/web_sys/struct.Element.html)
/// * [MDN docs](https://developer.mozilla.org/en-US/docs/Web/HTML/Element\)
/// * See also: [`web_sys` Node](https://rustwasm.github.io/wasm-bindgen/api/web_sys/struct.Node.html)
pub(crate) fn make_websys_el<Ms>(el: &mut El<Ms>, document: &web_sys::Document) -> web_sys::Node {
let tag = el.tag.as_str();
let el_ws = match el.namespace {
Some(ref ns) => document
.create_element_ns(Some(ns.as_str()), tag)
.expect("Problem creating web-sys element with namespace"),
None => document
.create_element(tag)
.expect("Problem creating web-sys element"),
};
for (at, attr_value) in &el.attrs.vals {
set_attr_value(&el_ws, at, attr_value);
}
if let Some(ns) = &el.namespace {
el_ws
.dyn_ref::<web_sys::Element>()
.expect("Problem casting Node as Element while setting an attribute")
.set_attribute("xmlns", ns.as_str())
.expect("Problem setting xlmns attribute");
}
// Style is just an attribute in the actual Dom, but is handled specially in our vdom;
// merge the different parts of style here.
if el.style.vals.keys().len() > 0 {
set_style(&el_ws, &el.style)
}
el_ws.into()
}
/// Similar to `attach_el_and_children`, but for text nodes
pub fn attach_text_node(text: &mut Text, parent: &web_sys::Node) {
let node_ws = text.node_ws.take().expect("Missing websys node for Text");
parent
.append_child(&node_ws)
.expect("Problem appending text node");
text.node_ws.replace(node_ws);
}
/// Similar to `attach_el_and_children`, but without attaching the elemnt. Useful for
/// patching, where we want to insert the element at a specific place.
pub fn attach_children<Ms>(el: &mut El<Ms>, mailbox: &Mailbox<Ms>) {
let el_ws = el
.node_ws
.as_ref()
.expect("Missing websys el in attach_children");
// appending the its children to the el_ws
for child in &mut el.children {
match child {
// Raise the active level once per recursion.
Node::Element(child_el) => attach_el_and_children(child_el, el_ws, mailbox),
Node::Text(child_text) => attach_text_node(child_text, el_ws),
Node::Empty => (),
}
}
}
/// Attaches the element, and all children, recursively. Only run this when creating a fresh vdom node, since
/// it performs a rerender of the el and all children; eg a potentially-expensive op.
/// This is where rendering occurs.
pub fn attach_el_and_children<Ms>(el: &mut El<Ms>, parent: &web_sys::Node, mailbox: &Mailbox<Ms>) {
// No parent means we're operating on the top-level element; append it to the main div.
// This is how we call this function externally, ie not through recursion.
let el_ws = el
.node_ws
.as_ref()
.expect("Missing websys el in attach_el_and_children");
// Append the element
// todo: This error can occur with raw html elements, but am unsure of the cause.
if parent.append_child(el_ws).is_err() {
crate::error("Minor problem with html element (append)");
}
el.event_handler_manager
.attach_listeners(el_ws.clone(), None, mailbox);
// appending the its children to the el_ws
for child in &mut el.children {
match child {
// Raise the active level once per recursion.
Node::Element(child_el) => attach_el_and_children(child_el, el_ws, mailbox),
Node::Text(child_text) => attach_text_node(child_text, el_ws),
Node::Empty => (),
}
}
// Note: Call `set_default_element_state` after child appending,
// otherwise it breaks autofocus in Firefox
set_default_element_state(el_ws, el);
}
fn set_default_element_state<Ms>(el_ws: &web_sys::Node, el: &El<Ms>) {
// @TODO handle also other Auto* attributes?
// Set focus because of attribute "autofocus"
if let Some(at_value) = el.attrs.vals.get(&At::AutoFocus) {
match at_value {
AtValue::Some(_) | AtValue::None => el_ws
.dyn_ref::<web_sys::HtmlElement>()
.expect("Problem casting Node as HtmlElement while focusing")
.focus()
.expect("Problem focusing to an element."),
AtValue::Ignored => (),
}
}
// We set Textarea's initial value through non-standard attribute "value", so we have to simulate
// the standard way (i.e. `<textarea>A Value</textarea>`)
if let Some(textarea) = el_ws.dyn_ref::<web_sys::HtmlTextAreaElement>() {
if let Some(AtValue::Some(value)) = el.attrs.vals.get(&At::Value) {
textarea.set_value(value);
}
}
}
/// Recursively remove all children.
pub fn _remove_children(el: &web_sys::Node) {
while let Some(child) = el.last_child() {
el.remove_child(&child).expect("Problem removing child");
}
}
/// Update the attributes, style, text, and events of an element. Does not
/// process children, and assumes the tag is the same. Assume we've identfied
/// the most-correct pairing between new and old.
pub fn patch_el_details<Ms>(
old: &mut El<Ms>,
new: &mut El<Ms>,
old_el_ws: &web_sys::Node,
mailbox: &Mailbox<Ms>,
) {
for (key, new_val) in &new.attrs.vals {
match old.attrs.vals.get(key) {
Some(old_val) => {
// The value's different
if old_val != new_val {
set_attr_value(old_el_ws, key, new_val);
}
}
None => {
set_attr_value(old_el_ws, key, new_val);
}
}
// We handle value in the vdom using attributes, but the DOM needs
// to use set_value or set_checked.
match key {
At::Value => match new_val {
AtValue::Some(new_val) => crate::util::set_value(old_el_ws, new_val),
AtValue::None | AtValue::Ignored => crate::util::set_value(old_el_ws, ""),
},
At::Checked => match new_val {
AtValue::Some(_) | AtValue::None => crate::util::set_checked(old_el_ws, true),
AtValue::Ignored => crate::util::set_checked(old_el_ws, false),
},
_ => Ok(()),
}
.unwrap_or_else(|err| {
crate::error(err);
})
}
// Remove attributes that aren't in the new vdom.
for (key, old_val) in &old.attrs.vals {
if new.attrs.vals.get(key).is_none() {
// todo get to the bottom of this
match old_el_ws.dyn_ref::<web_sys::Element>() {
Some(el) => {
el.remove_attribute(key.as_str())
.expect("Removing an attribute");
// We handle value in the vdom using attributes, but the DOM needs
// to use set_value or set_checked.
match key {
At::Value => match old_val {
AtValue::Some(_) => crate::util::set_value(old_el_ws, ""),
_ => Ok(()),
},
At::Checked => match old_val {
AtValue::Some(_) | AtValue::None => {
crate::util::set_checked(old_el_ws, false)
}
_ => Ok(()),
},
_ => Ok(()),
}
.unwrap_or_else(|err| {
crate::error(err);
})
}
None => {
crate::error("Minor error on html element (setting attrs)");
}
}
}
}
// Patch event handlers and listeners.
new.event_handler_manager.attach_listeners(
old_el_ws.clone(),
Some(&mut old.event_handler_manager),
mailbox,
);
// Patch style.
if old.style != new.style {
// We can't patch each part of style; rewrite the whole attribute.
set_style(old_el_ws, &new.style)
}
}
#[allow(clippy::too_many_lines)]
impl<Ms> From<&web_sys::Element> for El<Ms> {
/// Create a vdom node from a `web_sys::Element`. Used in creating elements from html
/// and markdown strings. Includes children, recursively added.
#[allow(clippy::too_many_lines)]
fn from(ws_el: &web_sys::Element) -> Self {
// Result of tag_name is all caps, but tag From<String> expects lower.
// Probably is more pure to match by xlmns attribute instead.
let mut el = match ws_el.tag_name().to_lowercase().as_ref() {
"svg" => El::empty_svg(ws_el.tag_name().to_lowercase().into()),
_ => El::empty(ws_el.tag_name().to_lowercase().into()),
};
// Populate attributes
let mut attrs = Attrs::empty();
ws_el
.get_attribute_names()
.for_each(&mut |attr_name, _, _| {
let attr_name = attr_name
.as_string()
.expect("problem converting attr to string");
if let Some(attr_val) = ws_el.get_attribute(&attr_name) {
attrs.add(attr_name.into(), &attr_val);
}
});
el.attrs = attrs;
// todo This is the same list in `shortcuts::element_svg!`.
// todo: Fix this repetition: Use `/scripts/populate_tags.rs`
// todo to consolodate these lists.
let svg_tags = [
"line",
"rect",
"circle",
"ellipse",
"polygon",
"polyline",
"mesh",
"path",
"defs",
"g",
"marker",
"mask",
"pattern",
"svg",
"switch",
"symbol",
"unknown",
"linear_gradient",
"radial_gradient",
"mesh_gradient",
"stop",
"image",
"r#use",
"altGlyph",
"altGlyphDef",
"altGlyphItem",
"glyph",
"glyphRef",
"textPath",
"text",
"tref",
"tspan",
"clipPath",
"cursor",
"filter",
"foreignObject",
"hathpath",
"meshPatch",
"meshRow",
"view",
"colorProfile",
"animage",
"animateColor",
"animateMotion",
"animateTransform",
"discard",
"mpath",
"set",
"desc",
"metadata",
"title",
"feBlend",
"feColorMatrix",
"feComponentTransfer",
"feComposite",
"feConvolveMatrix",
"feDiffuseLighting",
"feDisplacementMap",
"feDropShadow",
"feFlood",
"feFuncA",
"feFuncB",
"feFuncG",
"feFuncR",
"feGaussianBlur",
"feImage",
"feMerge",
"feMergeNode",
"feMorphology",
"feOffset",
"feSpecularLighting",
"feTile",
"feTurbulence",
"font",
"hkern",
"vkern",
"hatch",
"solidcolor",
];
if svg_tags.contains(&ws_el.tag_name().to_lowercase().as_str()) {
el.namespace = Some(Namespace::Svg);
}
if let Some(ns) = ws_el.namespace_uri() {
// Prevent attaching a `xlmns` attribute to normal HTML elements.
if ns != "http://www.w3.org/1999/xhtml" {
el.namespace = Some(ns.into());
}
}
let children = ws_el.child_nodes();
for i in 0..children.length() {
let child = children
.get(i)
.expect("Can't find child in raw html element.");
if let Some(child_vdom) = node_from_ws(&child) {
el.children.push(child_vdom);
}
}
el
}
}
impl<Ms> From<&web_sys::Element> for Node<Ms> {
fn from(ws_el: &web_sys::Element) -> Node<Ms> {
Node::Element(ws_el.into())
}
}
/// Create a vdom node from a `web_sys::Node`. Used in creating elements from html
/// and markdown strings. Includes children, recursively added.
pub fn node_from_ws<Ms>(node: &web_sys::Node) -> Option<Node<Ms>> {
match node.node_type() {
web_sys::Node::ELEMENT_NODE => {
// Element node
let ws_el = node
.dyn_ref::<web_sys::Element>()
.expect("Problem casting Node as Element");
// Create the Element
Some(ws_el.into())
}
web_sys::Node::TEXT_NODE => Some(Node::new_text(
node.text_content().expect("Can't find text"),
)),
web_sys::Node::COMMENT_NODE => None,
node_type => {
crate::error(format!(
"HTML node type {} is not supported by Seed",
node_type
));
None
}
}
}
/// Insert a new node into the specified part of the DOM tree.
pub(crate) fn insert_node(
node: &web_sys::Node,
parent: &web_sys::Node,
next: Option<web_sys::Node>,
) {
match next {
Some(n) => {
parent
.insert_before(node, Some(&n))
.expect("Problem inserting node");
}
None => {
parent.append_child(node).expect("Problem inserting node");
}
};
}
pub(crate) fn remove_node(node: &web_sys::Node, parent: &web_sys::Node) {
parent
.remove_child(node)
.expect("Problem removing old el_ws when updating to empty");
}
pub(crate) fn replace_child(new: &web_sys::Node, old: &web_sys::Node, parent: &web_sys::Node) {
parent
.replace_child(new, old)
.expect("Problem replacing element");
}
| 34.159136 | 109 | 0.534537 |
7a93082c7288f2f8d70636b82bdf88c9760bd94b | 12,976 | #![allow(clippy::new_ret_no_self)]
use pyo3::{exceptions, prelude::*, types::PyAny};
use crate::{
document::{extract_value, Document},
get_field,
query::Query,
schema::Schema,
searcher::Searcher,
to_pyerr,
};
use tantivy as tv;
use tantivy::{
directory::MmapDirectory,
schema::{NamedFieldDocument, Term, Value},
};
const RELOAD_POLICY: &str = "commit";
/// IndexWriter is the user entry-point to add documents to the index.
///
/// To create an IndexWriter first create an Index and call the writer() method
/// on the index object.
#[pyclass]
pub(crate) struct IndexWriter {
inner_index_writer: tv::IndexWriter,
schema: tv::schema::Schema,
}
#[pymethods]
impl IndexWriter {
/// Add a document to the index.
///
/// If the indexing pipeline is full, this call may block.
///
/// Returns an `opstamp`, which is an increasing integer that can be used
/// by the client to align commits with its own document queue.
/// The `opstamp` represents the number of documents that have been added
/// since the creation of the index.
pub fn add_document(&mut self, doc: &Document) -> PyResult<u64> {
let named_doc = NamedFieldDocument(doc.field_values.clone());
let doc = self.schema.convert_named_doc(named_doc).map_err(to_pyerr)?;
Ok(self.inner_index_writer.add_document(doc))
}
/// Helper for the `add_document` method, but passing a json string.
///
/// If the indexing pipeline is full, this call may block.
///
/// Returns an `opstamp`, which is an increasing integer that can be used
/// by the client to align commits with its own document queue.
/// The `opstamp` represents the number of documents that have been added
/// since the creation of the index.
pub fn add_json(&mut self, json: &str) -> PyResult<u64> {
let doc = self.schema.parse_document(json).map_err(to_pyerr)?;
let opstamp = self.inner_index_writer.add_document(doc);
Ok(opstamp)
}
/// Commits all of the pending changes
///
/// A call to commit blocks. After it returns, all of the document that
/// were added since the last commit are published and persisted.
///
/// In case of a crash or an hardware failure (as long as the hard disk is
/// spared), it will be possible to resume indexing from this point.
///
/// Returns the `opstamp` of the last document that made it in the commit.
fn commit(&mut self) -> PyResult<u64> {
self.inner_index_writer.commit().map_err(to_pyerr)
}
/// Rollback to the last commit
///
/// This cancels all of the update that happened before after the last
/// commit. After calling rollback, the index is in the same state as it
/// was after the last commit.
fn rollback(&mut self) -> PyResult<u64> {
self.inner_index_writer.rollback().map_err(to_pyerr)
}
/// Detect and removes the files that are not used by the index anymore.
fn garbage_collect_files(&mut self) -> PyResult<()> {
use futures::executor::block_on;
block_on(self.inner_index_writer.garbage_collect_files())
.map_err(to_pyerr)?;
Ok(())
}
/// The opstamp of the last successful commit.
///
/// This is the opstamp the index will rollback to if there is a failure
/// like a power surge.
///
/// This is also the opstamp of the commit that is currently available
/// for searchers.
#[getter]
fn commit_opstamp(&self) -> u64 {
self.inner_index_writer.commit_opstamp()
}
/// Delete all documents containing a given term.
///
/// Args:
/// field_name (str): The field name for which we want to filter deleted docs.
/// field_value (PyAny): Python object with the value we want to filter.
///
/// If the field_name is not on the schema raises ValueError exception.
/// If the field_value is not supported raises Exception.
fn delete_documents(
&mut self,
field_name: &str,
field_value: &PyAny,
) -> PyResult<u64> {
let field = get_field(&self.schema, field_name)?;
let value = extract_value(field_value)?;
let term = match value {
Value::Str(text) => Term::from_field_text(field, &text),
Value::U64(num) => Term::from_field_u64(field, num),
Value::I64(num) => Term::from_field_i64(field, num),
Value::F64(num) => Term::from_field_f64(field, num),
Value::Date(d) => Term::from_field_date(field, &d),
Value::Facet(facet) => Term::from_facet(field, &facet),
Value::Bytes(_) => {
return Err(exceptions::PyValueError::new_err(format!(
"Field `{}` is bytes type not deletable.",
field_name
)))
}
Value::PreTokStr(_pretok) => {
return Err(exceptions::PyValueError::new_err(format!(
"Field `{}` is pretokenized. This is not authorized for delete.",
field_name
)))
}
};
Ok(self.inner_index_writer.delete_term(term))
}
}
/// Create a new index object.
///
/// Args:
/// schema (Schema): The schema of the index.
/// path (str, optional): The path where the index should be stored. If
/// no path is provided, the index will be stored in memory.
/// reuse (bool, optional): Should we open an existing index if one exists
/// or always create a new one.
///
/// If an index already exists it will be opened and reused. Raises OSError
/// if there was a problem during the opening or creation of the index.
#[pyclass]
pub(crate) struct Index {
pub(crate) index: tv::Index,
reader: tv::IndexReader,
}
#[pymethods]
impl Index {
#[staticmethod]
fn open(path: &str) -> PyResult<Index> {
let index = tv::Index::open_in_dir(path).map_err(to_pyerr)?;
let reader = index.reader().map_err(to_pyerr)?;
Ok(Index { index, reader })
}
#[new]
#[args(reuse = true)]
fn new(schema: &Schema, path: Option<&str>, reuse: bool) -> PyResult<Self> {
let index = match path {
Some(p) => {
let directory = MmapDirectory::open(p).map_err(to_pyerr)?;
if reuse {
tv::Index::open_or_create(directory, schema.inner.clone())
} else {
tv::Index::create(
directory,
schema.inner.clone(),
tv::IndexSettings::default(),
)
}
.map_err(to_pyerr)?
}
None => tv::Index::create_in_ram(schema.inner.clone()),
};
let reader = index.reader().map_err(to_pyerr)?;
Ok(Index { index, reader })
}
/// Create a `IndexWriter` for the index.
///
/// The writer will be multithreaded and the provided heap size will be
/// split between the given number of threads.
///
/// Args:
/// overall_heap_size (int, optional): The total target memory usage of
/// the writer, can't be less than 3000000.
/// num_threads (int, optional): The number of threads that the writer
/// should use. If this value is 0, tantivy will choose
/// automatically the number of threads.
///
/// Raises ValueError if there was an error while creating the writer.
#[args(heap_size = 3000000, num_threads = 0)]
fn writer(
&self,
heap_size: usize,
num_threads: usize,
) -> PyResult<IndexWriter> {
let writer = match num_threads {
0 => self.index.writer(heap_size),
_ => self.index.writer_with_num_threads(num_threads, heap_size),
}
.map_err(to_pyerr)?;
let schema = self.index.schema();
Ok(IndexWriter {
inner_index_writer: writer,
schema,
})
}
/// Configure the index reader.
///
/// Args:
/// reload_policy (str, optional): The reload policy that the
/// IndexReader should use. Can be `Manual` or `OnCommit`.
/// num_searchers (int, optional): The number of searchers that the
/// reader should create.
#[args(reload_policy = "RELOAD_POLICY", num_searchers = 0)]
fn config_reader(
&mut self,
reload_policy: &str,
num_searchers: usize,
) -> Result<(), PyErr> {
let reload_policy = reload_policy.to_lowercase();
let reload_policy = match reload_policy.as_ref() {
"commit" => tv::ReloadPolicy::OnCommit,
"on-commit" => tv::ReloadPolicy::OnCommit,
"oncommit" => tv::ReloadPolicy::OnCommit,
"manual" => tv::ReloadPolicy::Manual,
_ => return Err(exceptions::PyValueError::new_err(
"Invalid reload policy, valid choices are: 'manual' and 'OnCommit'"
))
};
let builder = self.index.reader_builder();
let builder = builder.reload_policy(reload_policy);
let builder = if num_searchers > 0 {
builder.num_searchers(num_searchers)
} else {
builder
};
self.reader = builder.try_into().map_err(to_pyerr)?;
Ok(())
}
/// Acquires a Searcher from the searcher pool.
///
/// If no searcher is available during the call, note that
/// this call will block until one is made available.
///
/// Searcher are automatically released back into the pool when
/// they are dropped. If you observe this function to block forever
/// you probably should configure the Index to have a larger
/// searcher pool, or you are holding references to previous searcher
/// for ever.
fn searcher(&self, py: Python) -> Searcher {
Searcher {
inner: py.allow_threads(|| self.reader.searcher()),
}
}
/// Check if the given path contains an existing index.
/// Args:
/// path: The path where tantivy will search for an index.
///
/// Returns True if an index exists at the given path, False otherwise.
///
/// Raises OSError if the directory cannot be opened.
#[staticmethod]
fn exists(path: &str) -> PyResult<bool> {
let directory = MmapDirectory::open(path).map_err(to_pyerr)?;
Ok(tv::Index::exists(&directory).unwrap())
}
/// The schema of the current index.
#[getter]
fn schema(&self) -> Schema {
let schema = self.index.schema();
Schema { inner: schema }
}
/// Update searchers so that they reflect the state of the last .commit().
///
/// If you set up the the reload policy to be on 'commit' (which is the
/// default) every commit should be rapidly reflected on your IndexReader
/// and you should not need to call reload() at all.
fn reload(&self) -> PyResult<()> {
self.reader.reload().map_err(to_pyerr)
}
/// Parse a query
///
/// Args:
/// query: the query, following the tantivy query language.
/// default_fields (List[Field]): A list of fields used to search if no
/// field is specified in the query.
///
#[args(reload_policy = "RELOAD_POLICY")]
pub fn parse_query(
&self,
query: &str,
default_field_names: Option<Vec<String>>,
) -> PyResult<Query> {
let mut default_fields = vec![];
let schema = self.index.schema();
if let Some(default_field_names_vec) = default_field_names {
for default_field_name in &default_field_names_vec {
if let Some(field) = schema.get_field(default_field_name) {
let field_entry = schema.get_field_entry(field);
if !field_entry.is_indexed() {
return Err(exceptions::PyValueError::new_err(
format!(
"Field `{}` is not set as indexed in the schema.",
default_field_name
),
));
}
default_fields.push(field);
} else {
return Err(exceptions::PyValueError::new_err(format!(
"Field `{}` is not defined in the schema.",
default_field_name
)));
}
}
} else {
for (field, field_entry) in self.index.schema().fields() {
if field_entry.is_indexed() {
default_fields.push(field);
}
}
}
let parser =
tv::query::QueryParser::for_index(&self.index, default_fields);
let query = parser.parse_query(query).map_err(to_pyerr)?;
Ok(Query { inner: query })
}
}
| 36.863636 | 86 | 0.584309 |
e5fd8f89c9463b005d37cb3edaaebe5881f1c834 | 1,466 | use components::module::_common::edit::prelude::*;
use std::rc::Rc;
use shared::domain::jig::{
JigId,
Jig,
module::{
ModuleId,
body::{
ThemeChoice,
_groups::design::Trace as RawTrace,
Audio,
Instructions,
cover::{Step, Content as RawContent, ModuleData as RawData}
}
}
};
use super::{
state::*,
footer::state::Footer,
header::state::Header,
main::state::Main,
overlay::state::Overlay,
sidebar::state::Sidebar
};
use dominator::clone;
use futures_signals::signal::{ReadOnlyMutable, Mutable};
use utils::prelude::*;
use components::{
text_editor::state::State as TextEditorState,
audio::mixer::AudioMixer,
};
pub async fn init_from_raw(init_args: BaseInitFromRawArgs<RawData, (), Step>) -> BaseInit<Step, Base, Main, Sidebar, Header, Footer, Overlay> {
let force_step = {
if init_args.source == InitSource::ForceRaw {
crate::debug::settings().step
} else {
None
}
};
let base = Base::new(init_args).await;
BaseInit {
force_step,
force_theme: None,
base: base.clone(),
main: Rc::new(Main::new(base.clone())),
sidebar: Rc::new(Sidebar::new(base.clone())),
header: Rc::new(Header::new(base.clone())),
footer: Rc::new(Footer::new(base.clone())),
overlay: Rc::new(Overlay::new(base.clone())),
}
}
| 25.719298 | 143 | 0.57708 |
9b36d573a1507ddae61d3da60ff033e42d913844 | 12,989 | use std::convert::TryFrom;
use std::fmt;
use http::{request::Parts, Method, Request as HttpRequest};
use serde::Serialize;
#[cfg(feature = "json")]
use serde_json;
use serde_urlencoded;
use url::Url;
use web_sys::RequestCredentials;
use super::{Body, Client, Response};
use crate::header::{HeaderMap, HeaderName, HeaderValue, CONTENT_TYPE};
/// A request which can be executed with `Client::execute()`.
pub struct Request {
method: Method,
url: Url,
headers: HeaderMap,
body: Option<Body>,
pub(super) cors: bool,
pub(super) credentials: Option<RequestCredentials>,
}
/// A builder to construct the properties of a `Request`.
pub struct RequestBuilder {
client: Client,
request: crate::Result<Request>,
}
impl Request {
/// Constructs a new request.
#[inline]
pub fn new(method: Method, url: Url) -> Self {
Request {
method,
url,
headers: HeaderMap::new(),
body: None,
cors: true,
credentials: None,
}
}
/// Get the method.
#[inline]
pub fn method(&self) -> &Method {
&self.method
}
/// Get a mutable reference to the method.
#[inline]
pub fn method_mut(&mut self) -> &mut Method {
&mut self.method
}
/// Get the url.
#[inline]
pub fn url(&self) -> &Url {
&self.url
}
/// Get a mutable reference to the url.
#[inline]
pub fn url_mut(&mut self) -> &mut Url {
&mut self.url
}
/// Get the headers.
#[inline]
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
/// Get a mutable reference to the headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
/// Get the body.
#[inline]
pub fn body(&self) -> Option<&Body> {
self.body.as_ref()
}
/// Get a mutable reference to the body.
#[inline]
pub fn body_mut(&mut self) -> &mut Option<Body> {
&mut self.body
}
/// Attempts to clone the `Request`.
///
/// None is returned if a body is which can not be cloned.
pub fn try_clone(&self) -> Option<Request> {
let body = match self.body.as_ref() {
Some(ref body) => Some(body.try_clone()?),
None => None,
};
Some(Self {
method: self.method.clone(),
url: self.url.clone(),
headers: self.headers.clone(),
body,
cors: self.cors,
credentials: self.credentials.clone(),
})
}
}
impl RequestBuilder {
pub(super) fn new(client: Client, request: crate::Result<Request>) -> RequestBuilder {
RequestBuilder { client, request }
}
/// Modify the query string of the URL.
///
/// Modifies the URL of this request, adding the parameters provided.
/// This method appends and does not overwrite. This means that it can
/// be called multiple times and that existing query parameters are not
/// overwritten if the same key is used. The key will simply show up
/// twice in the query string.
/// Calling `.query([("foo", "a"), ("foo", "b")])` gives `"foo=a&foo=b"`.
///
/// # Note
/// This method does not support serializing a single key-value
/// pair. Instead of using `.query(("key", "val"))`, use a sequence, such
/// as `.query(&[("key", "val")])`. It's also possible to serialize structs
/// and maps into a key-value pair.
///
/// # Errors
/// This method will fail if the object you provide cannot be serialized
/// into a query string.
pub fn query<T: Serialize + ?Sized>(mut self, query: &T) -> RequestBuilder {
let mut error = None;
if let Ok(ref mut req) = self.request {
let url = req.url_mut();
let mut pairs = url.query_pairs_mut();
let serializer = serde_urlencoded::Serializer::new(&mut pairs);
if let Err(err) = query.serialize(serializer) {
error = Some(crate::error::builder(err));
}
}
if let Ok(ref mut req) = self.request {
if let Some("") = req.url().query() {
req.url_mut().set_query(None);
}
}
if let Some(err) = error {
self.request = Err(err);
}
self
}
/// Send a form body.
pub fn form<T: Serialize + ?Sized>(mut self, form: &T) -> RequestBuilder {
let mut error = None;
if let Ok(ref mut req) = self.request {
match serde_urlencoded::to_string(form) {
Ok(body) => {
req.headers_mut().insert(
CONTENT_TYPE,
HeaderValue::from_static("application/x-www-form-urlencoded"),
);
*req.body_mut() = Some(body.into());
}
Err(err) => error = Some(crate::error::builder(err)),
}
}
if let Some(err) = error {
self.request = Err(err);
}
self
}
#[cfg(feature = "json")]
#[cfg_attr(docsrs, doc(cfg(feature = "json")))]
/// Set the request json
pub fn json<T: Serialize + ?Sized>(mut self, json: &T) -> RequestBuilder {
let mut error = None;
if let Ok(ref mut req) = self.request {
match serde_json::to_vec(json) {
Ok(body) => {
req.headers_mut()
.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
*req.body_mut() = Some(body.into());
}
Err(err) => error = Some(crate::error::builder(err)),
}
}
if let Some(err) = error {
self.request = Err(err);
}
self
}
/// Enable HTTP bearer authentication.
pub fn bearer_auth<T>(self, token: T) -> RequestBuilder
where
T: fmt::Display,
{
let header_value = format!("Bearer {}", token);
self.header(crate::header::AUTHORIZATION, header_value)
}
/// Set the request body.
pub fn body<T: Into<Body>>(mut self, body: T) -> RequestBuilder {
if let Ok(ref mut req) = self.request {
req.body = Some(body.into());
}
self
}
/// TODO
#[cfg(feature = "multipart")]
#[cfg_attr(docsrs, doc(cfg(feature = "multipart")))]
pub fn multipart(mut self, multipart: super::multipart::Form) -> RequestBuilder {
if let Ok(ref mut req) = self.request {
*req.body_mut() = Some(Body::from_form(multipart))
}
self
}
/// Add a `Header` to this Request.
pub fn header<K, V>(mut self, key: K, value: V) -> RequestBuilder
where
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<http::Error>,
HeaderValue: TryFrom<V>,
<HeaderValue as TryFrom<V>>::Error: Into<http::Error>,
{
let mut error = None;
if let Ok(ref mut req) = self.request {
match <HeaderName as TryFrom<K>>::try_from(key) {
Ok(key) => match <HeaderValue as TryFrom<V>>::try_from(value) {
Ok(value) => {
req.headers_mut().append(key, value);
}
Err(e) => error = Some(crate::error::builder(e.into())),
},
Err(e) => error = Some(crate::error::builder(e.into())),
};
}
if let Some(err) = error {
self.request = Err(err);
}
self
}
/// Add a set of Headers to the existing ones on this Request.
///
/// The headers will be merged in to any already set.
pub fn headers(mut self, headers: crate::header::HeaderMap) -> RequestBuilder {
if let Ok(ref mut req) = self.request {
crate::util::replace_headers(req.headers_mut(), headers);
}
self
}
/// Disable CORS on fetching the request.
///
/// # WASM
///
/// This option is only effective with WebAssembly target.
///
/// The [request mode][mdn] will be set to 'no-cors'.
///
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/API/Request/mode
pub fn fetch_mode_no_cors(mut self) -> RequestBuilder {
if let Ok(ref mut req) = self.request {
req.cors = false;
}
self
}
/// Set fetch credentials to 'same-origin'
///
/// # WASM
///
/// This option is only effective with WebAssembly target.
///
/// The [request credentials][mdn] will be set to 'same-origin'.
///
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/API/Request/credentials
pub fn fetch_credentials_same_origin(mut self) -> RequestBuilder {
if let Ok(ref mut req) = self.request {
req.credentials = Some(RequestCredentials::SameOrigin);
}
self
}
/// Set fetch credentials to 'include'
///
/// # WASM
///
/// This option is only effective with WebAssembly target.
///
/// The [request credentials][mdn] will be set to 'include'.
///
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/API/Request/credentials
pub fn fetch_credentials_include(mut self) -> RequestBuilder {
if let Ok(ref mut req) = self.request {
req.credentials = Some(RequestCredentials::Include);
}
self
}
/// Set fetch credentials to 'omit'
///
/// # WASM
///
/// This option is only effective with WebAssembly target.
///
/// The [request credentials][mdn] will be set to 'omit'.
///
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/API/Request/credentials
pub fn fetch_credentials_omit(mut self) -> RequestBuilder {
if let Ok(ref mut req) = self.request {
req.credentials = Some(RequestCredentials::Omit);
}
self
}
/// Build a `Request`, which can be inspected, modified and executed with
/// `Client::execute()`.
pub fn build(self) -> crate::Result<Request> {
self.request
}
/// Constructs the Request and sends it to the target URL, returning a
/// future Response.
///
/// # Errors
///
/// This method fails if there was an error while sending request.
///
/// # Example
///
/// ```no_run
/// # use reqwest::Error;
/// #
/// # async fn run() -> Result<(), Error> {
/// let response = reqwest::Client::new()
/// .get("https://hyper.rs")
/// .send()
/// .await?;
/// # Ok(())
/// # }
/// ```
pub async fn send(self) -> crate::Result<Response> {
let req = self.request?;
self.client.execute_request(req).await
}
/// Attempt to clone the RequestBuilder.
///
/// `None` is returned if the RequestBuilder can not be cloned.
///
/// # Examples
///
/// ```no_run
/// # use reqwest::Error;
/// #
/// # fn run() -> Result<(), Error> {
/// let client = reqwest::Client::new();
/// let builder = client.post("http://httpbin.org/post")
/// .body("from a &str!");
/// let clone = builder.try_clone();
/// assert!(clone.is_some());
/// # Ok(())
/// # }
/// ```
pub fn try_clone(&self) -> Option<RequestBuilder> {
self.request
.as_ref()
.ok()
.and_then(|req| req.try_clone())
.map(|req| RequestBuilder {
client: self.client.clone(),
request: Ok(req),
})
}
}
impl fmt::Debug for Request {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt_request_fields(&mut f.debug_struct("Request"), self).finish()
}
}
impl fmt::Debug for RequestBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut builder = f.debug_struct("RequestBuilder");
match self.request {
Ok(ref req) => fmt_request_fields(&mut builder, req).finish(),
Err(ref err) => builder.field("error", err).finish(),
}
}
}
fn fmt_request_fields<'a, 'b>(
f: &'a mut fmt::DebugStruct<'a, 'b>,
req: &Request,
) -> &'a mut fmt::DebugStruct<'a, 'b> {
f.field("method", &req.method)
.field("url", &req.url)
.field("headers", &req.headers)
}
impl<T> TryFrom<HttpRequest<T>> for Request
where
T: Into<Body>,
{
type Error = crate::Error;
fn try_from(req: HttpRequest<T>) -> crate::Result<Self> {
let (parts, body) = req.into_parts();
let Parts {
method,
uri,
headers,
..
} = parts;
let url = Url::parse(&uri.to_string()).map_err(crate::error::builder)?;
Ok(Request {
method,
url,
headers,
body: Some(body.into()),
cors: true,
credentials: None,
})
}
}
| 29.655251 | 92 | 0.534837 |
285616e7be7893c63fbbb6fa0761169a36ef39a6 | 1,689 | #[doc = "Reader of register _0_RIS"]
pub type R = crate::R<u32, super::_0_RIS>;
#[doc = "Reader of field `INTCNTZERO`"]
pub type INTCNTZERO_R = crate::R<bool, bool>;
#[doc = "Reader of field `INTCNTLOAD`"]
pub type INTCNTLOAD_R = crate::R<bool, bool>;
#[doc = "Reader of field `INTCMPAU`"]
pub type INTCMPAU_R = crate::R<bool, bool>;
#[doc = "Reader of field `INTCMPAD`"]
pub type INTCMPAD_R = crate::R<bool, bool>;
#[doc = "Reader of field `INTCMPBU`"]
pub type INTCMPBU_R = crate::R<bool, bool>;
#[doc = "Reader of field `INTCMPBD`"]
pub type INTCMPBD_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - Counter=0 Interrupt Status"]
#[inline(always)]
pub fn intcntzero(&self) -> INTCNTZERO_R {
INTCNTZERO_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Counter=Load Interrupt Status"]
#[inline(always)]
pub fn intcntload(&self) -> INTCNTLOAD_R {
INTCNTLOAD_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Comparator A Up Interrupt Status"]
#[inline(always)]
pub fn intcmpau(&self) -> INTCMPAU_R {
INTCMPAU_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Comparator A Down Interrupt Status"]
#[inline(always)]
pub fn intcmpad(&self) -> INTCMPAD_R {
INTCMPAD_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Comparator B Up Interrupt Status"]
#[inline(always)]
pub fn intcmpbu(&self) -> INTCMPBU_R {
INTCMPBU_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Comparator B Down Interrupt Status"]
#[inline(always)]
pub fn intcmpbd(&self) -> INTCMPBD_R {
INTCMPBD_R::new(((self.bits >> 5) & 0x01) != 0)
}
}
| 35.93617 | 57 | 0.597987 |
50988e77a83609001c749099cb3b6e445ea7dd52 | 1,603 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use alloc::collections::VecDeque;
use alloc::vec::Vec;
use move_binary_format::errors::PartialVMResult;
use move_core_types::gas_schedule::ONE_GAS_UNIT;
#[allow(unused_imports)]
use move_vm_types::values::{values_impl::debug::print_reference, Reference};
use move_vm_types::{
loaded_data::runtime_types::Type,
natives::function::{NativeContext, NativeResult},
values::Value,
};
use smallvec::smallvec;
#[allow(unused_mut)]
#[allow(unused_variables)]
pub fn native_print(
context: &mut impl NativeContext,
mut ty_args: Vec<Type>,
mut args: VecDeque<Value>,
) -> PartialVMResult<NativeResult> {
debug_assert!(ty_args.len() == 1);
debug_assert!(args.len() == 1);
// No-op if the feature flag is not present.
#[cfg(feature = "debug_module")]
{
let ty = ty_args.pop().unwrap();
let r = pop_arg!(args, Reference);
let mut buf = String::new();
print_reference(&mut buf, &r)?;
println!("[debug] {}", buf);
}
Ok(NativeResult::ok(ONE_GAS_UNIT, smallvec![]))
}
#[allow(unused_variables)]
pub fn native_print_stack_trace(
context: &mut impl NativeContext,
ty_args: Vec<Type>,
args: VecDeque<Value>,
) -> PartialVMResult<NativeResult> {
debug_assert!(ty_args.is_empty());
debug_assert!(args.is_empty());
#[cfg(feature = "debug_module")]
{
let mut s = String::new();
context.print_stack_trace(&mut s)?;
println!("{}", s);
}
Ok(NativeResult::ok(ONE_GAS_UNIT, smallvec![]))
}
| 27.169492 | 76 | 0.658141 |
011d8bce19a8554944b9d64b70c6bd0c53f6895d | 9,864 | use super::stat::WorkerLocalStat;
use super::work_bucket::*;
use super::*;
use crate::mmtk::MMTK;
use crate::util::copy::GCWorkerCopyContext;
use crate::util::opaque_pointer::*;
use crate::vm::{Collection, GCThreadContext, VMBinding};
use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
use crossbeam::deque::{self, Stealer};
use crossbeam::queue::ArrayQueue;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::Sender;
use std::sync::{Arc, Mutex};
/// The part shared between a GCWorker and the scheduler.
/// This structure is used for communication, e.g. adding new work packets.
pub struct GCWorkerShared<VM: VMBinding> {
/// Worker-local statistics data.
stat: AtomicRefCell<WorkerLocalStat<VM>>,
/// A queue of GCWork that can only be processed by the owned thread.
pub designated_work: ArrayQueue<Box<dyn GCWork<VM>>>,
/// Handle for stealing packets from the current worker
pub stealer: Option<Stealer<Box<dyn GCWork<VM>>>>,
}
impl<VM: VMBinding> GCWorkerShared<VM> {
pub fn new(stealer: Option<Stealer<Box<dyn GCWork<VM>>>>) -> Self {
Self {
stat: Default::default(),
designated_work: ArrayQueue::new(16),
stealer,
}
}
}
/// A GC worker. This part is privately owned by a worker thread.
/// The GC controller also has an embedded `GCWorker` because it may also execute work packets.
pub struct GCWorker<VM: VMBinding> {
/// The VM-specific thread-local state of the GC thread.
pub tls: VMWorkerThread,
/// The ordinal of the worker, numbered from 0 to the number of workers minus one.
/// 0 if it is the embedded worker of the GC controller thread.
pub ordinal: usize,
/// The reference to the scheduler.
scheduler: Arc<GCWorkScheduler<VM>>,
/// The copy context, used to implement copying GC.
copy: GCWorkerCopyContext<VM>,
/// The sending end of the channel to send message to the controller thread.
pub sender: Sender<CoordinatorMessage<VM>>,
/// The reference to the MMTk instance.
pub mmtk: &'static MMTK<VM>,
/// True if this struct is the embedded GCWorker of the controller thread.
/// False if this struct belongs to a standalone GCWorker thread.
is_coordinator: bool,
/// Reference to the shared part of the GC worker. It is used for synchronization.
pub shared: Arc<GCWorkerShared<VM>>,
/// Local work packet queue.
pub local_work_buffer: deque::Worker<Box<dyn GCWork<VM>>>,
}
unsafe impl<VM: VMBinding> Sync for GCWorkerShared<VM> {}
unsafe impl<VM: VMBinding> Send for GCWorkerShared<VM> {}
// Error message for borrowing `GCWorkerShared::stat`.
const STAT_BORROWED_MSG: &str = "GCWorkerShared.stat is already borrowed. This may happen if \
the mutator calls harness_begin or harness_end while the GC is running.";
impl<VM: VMBinding> GCWorkerShared<VM> {
pub fn borrow_stat(&self) -> AtomicRef<WorkerLocalStat<VM>> {
self.stat.try_borrow().expect(STAT_BORROWED_MSG)
}
pub fn borrow_stat_mut(&self) -> AtomicRefMut<WorkerLocalStat<VM>> {
self.stat.try_borrow_mut().expect(STAT_BORROWED_MSG)
}
}
impl<VM: VMBinding> GCWorker<VM> {
pub fn new(
mmtk: &'static MMTK<VM>,
ordinal: usize,
scheduler: Arc<GCWorkScheduler<VM>>,
is_coordinator: bool,
sender: Sender<CoordinatorMessage<VM>>,
shared: Arc<GCWorkerShared<VM>>,
local_work_buffer: deque::Worker<Box<dyn GCWork<VM>>>,
) -> Self {
Self {
tls: VMWorkerThread(VMThread::UNINITIALIZED),
ordinal,
// We will set this later
copy: GCWorkerCopyContext::new_non_copy(),
sender,
scheduler,
mmtk,
is_coordinator,
shared,
local_work_buffer,
}
}
const LOCALLY_CACHED_WORK_PACKETS: usize = 16;
/// Add a work packet to the work queue and mark it with a higher priority.
/// If the bucket is activated, the packet will be pushed to the local queue, otherwise it will be
/// pushed to the global bucket with a higher priority.
#[inline]
pub fn add_work_prioritized(&mut self, bucket: WorkBucketStage, work: impl GCWork<VM>) {
if !self.scheduler().work_buckets[bucket].is_activated()
|| self.local_work_buffer.len() >= Self::LOCALLY_CACHED_WORK_PACKETS
{
self.scheduler.work_buckets[bucket].add_prioritized(Box::new(work));
return;
}
self.local_work_buffer.push(Box::new(work));
}
/// Add a work packet to the work queue.
/// If the bucket is activated, the packet will be pushed to the local queue, otherwise it will be
/// pushed to the global bucket.
#[inline]
pub fn add_work(&mut self, bucket: WorkBucketStage, work: impl GCWork<VM>) {
if !self.scheduler().work_buckets[bucket].is_activated()
|| self.local_work_buffer.len() >= Self::LOCALLY_CACHED_WORK_PACKETS
{
self.scheduler.work_buckets[bucket].add(work);
return;
}
self.local_work_buffer.push(Box::new(work));
}
pub fn is_coordinator(&self) -> bool {
self.is_coordinator
}
pub fn scheduler(&self) -> &GCWorkScheduler<VM> {
&self.scheduler
}
pub fn get_copy_context_mut(&mut self) -> &mut GCWorkerCopyContext<VM> {
&mut self.copy
}
pub fn do_work(&'static mut self, mut work: impl GCWork<VM>) {
work.do_work(self, self.mmtk);
}
/// Poll a ready-to-execute work packet in the following order:
///
/// 1. Any packet that should be processed only by this worker.
/// 2. Poll from the local work queue.
/// 3. Poll from activated global work-buckets
/// 4. Steal from other workers
fn poll(&self) -> Box<dyn GCWork<VM>> {
self.shared
.designated_work
.pop()
.or_else(|| self.local_work_buffer.pop())
.unwrap_or_else(|| self.scheduler().poll(self))
}
pub fn do_boxed_work(&'static mut self, mut work: Box<dyn GCWork<VM>>) {
work.do_work(self, self.mmtk);
}
/// Entry of the worker thread.
/// Each worker will keep polling and executing work packets in a loop.
pub fn run(&mut self, tls: VMWorkerThread, mmtk: &'static MMTK<VM>) {
self.tls = tls;
self.copy = crate::plan::create_gc_worker_context(tls, mmtk);
loop {
let mut work = self.poll();
work.do_work_with_stat(self, mmtk);
}
}
}
/// A worker group to manage all the GC workers (except the coordinator worker).
pub struct WorkerGroup<VM: VMBinding> {
/// Shared worker data
pub workers_shared: Vec<Arc<GCWorkerShared<VM>>>,
parked_workers: AtomicUsize,
unspawned_local_work_queues: Mutex<Vec<deque::Worker<Box<dyn GCWork<VM>>>>>,
}
impl<VM: VMBinding> WorkerGroup<VM> {
/// Create a WorkerGroup
pub fn new(num_workers: usize) -> Arc<Self> {
let unspawned_local_work_queues = (0..num_workers)
.map(|_| deque::Worker::new_fifo())
.collect::<Vec<_>>();
let workers_shared = (0..num_workers)
.map(|i| {
Arc::new(GCWorkerShared::<VM>::new(Some(
unspawned_local_work_queues[i].stealer(),
)))
})
.collect::<Vec<_>>();
Arc::new(Self {
workers_shared,
parked_workers: Default::default(),
unspawned_local_work_queues: Mutex::new(unspawned_local_work_queues),
})
}
/// Spawn all the worker threads
pub fn spawn(
&self,
mmtk: &'static MMTK<VM>,
sender: Sender<CoordinatorMessage<VM>>,
tls: VMThread,
) {
let mut unspawned_local_work_queues = self.unspawned_local_work_queues.lock().unwrap();
// Spawn each worker thread.
for (ordinal, shared) in self.workers_shared.iter().enumerate() {
let worker = Box::new(GCWorker::new(
mmtk,
ordinal,
mmtk.scheduler.clone(),
false,
sender.clone(),
shared.clone(),
unspawned_local_work_queues.pop().unwrap(),
));
VM::VMCollection::spawn_gc_thread(tls, GCThreadContext::<VM>::Worker(worker));
}
debug_assert!(unspawned_local_work_queues.is_empty());
}
/// Get the number of workers in the group
#[inline(always)]
pub fn worker_count(&self) -> usize {
self.workers_shared.len()
}
/// Increase the packed-workers counter.
/// Called before a worker is parked.
///
/// Return true if all the workers are parked.
#[inline(always)]
pub fn inc_parked_workers(&self) -> bool {
let old = self.parked_workers.fetch_add(1, Ordering::SeqCst);
debug_assert!(old < self.worker_count());
old + 1 == self.worker_count()
}
/// Decrease the packed-workers counter.
/// Called after a worker is resumed from the parked state.
#[inline(always)]
pub fn dec_parked_workers(&self) {
let old = self.parked_workers.fetch_sub(1, Ordering::SeqCst);
debug_assert!(old <= self.worker_count());
}
/// Get the number of parked workers in the group
#[inline(always)]
pub fn parked_workers(&self) -> usize {
self.parked_workers.load(Ordering::SeqCst)
}
/// Check if all the workers are packed
#[inline(always)]
pub fn all_parked(&self) -> bool {
self.parked_workers() == self.worker_count()
}
/// Return true if there're any pending designated work
pub fn has_designated_work(&self) -> bool {
self.workers_shared
.iter()
.any(|w| !w.designated_work.is_empty())
}
}
| 35.73913 | 102 | 0.628548 |
bb91eb49b53b593a06cc5527eb8fb155cf03b0c8 | 2,275 | use std::os::raw::c_int;
use crate::fonts::atlas::{FontAtlas, FontId};
use crate::fonts::glyph::FontGlyph;
use crate::internal::{ImVector, RawCast};
use crate::sys;
/// Runtime data for a single font within a font atlas
#[repr(C)]
pub struct Font {
index_advance_x: ImVector<f32>,
pub fallback_advance_x: f32,
pub font_size: f32,
index_lookup: ImVector<sys::ImWchar>,
glyphs: ImVector<FontGlyph>,
fallback_glyph: *const FontGlyph,
pub display_offset: [f32; 2],
container_atlas: *mut FontAtlas,
config_data: *const sys::ImFontConfig,
pub config_data_count: i16,
pub fallback_char: sys::ImWchar,
pub ellipsis_char: sys::ImWchar,
pub scale: f32,
pub ascent: f32,
pub descent: f32,
pub metrics_total_surface: c_int,
pub dirty_lookup_tables: bool,
}
unsafe impl RawCast<sys::ImFont> for Font {}
impl Font {
/// Returns the identifier of this font
pub fn id(&self) -> FontId {
FontId(self as *const _)
}
}
#[test]
fn test_font_memory_layout() {
use std::mem;
assert_eq!(mem::size_of::<Font>(), mem::size_of::<sys::ImFont>());
assert_eq!(mem::align_of::<Font>(), mem::align_of::<sys::ImFont>());
use memoffset::offset_of;
use sys::ImFont;
macro_rules! assert_field_offset {
($l:ident, $r:ident) => {
assert_eq!(offset_of!(Font, $l), offset_of!(ImFont, $r));
};
};
assert_field_offset!(index_advance_x, IndexAdvanceX);
assert_field_offset!(fallback_advance_x, FallbackAdvanceX);
assert_field_offset!(font_size, FontSize);
assert_field_offset!(index_lookup, IndexLookup);
assert_field_offset!(glyphs, Glyphs);
assert_field_offset!(fallback_glyph, FallbackGlyph);
assert_field_offset!(display_offset, DisplayOffset);
assert_field_offset!(container_atlas, ContainerAtlas);
assert_field_offset!(config_data, ConfigData);
assert_field_offset!(config_data_count, ConfigDataCount);
assert_field_offset!(fallback_char, FallbackChar);
assert_field_offset!(scale, Scale);
assert_field_offset!(ascent, Ascent);
assert_field_offset!(descent, Descent);
assert_field_offset!(metrics_total_surface, MetricsTotalSurface);
assert_field_offset!(dirty_lookup_tables, DirtyLookupTables);
}
| 33.455882 | 72 | 0.704615 |
23787a5a363e82bf7d62f82fdd8bf90b30043731 | 996 | use std::ops::Add;
pub struct Scanner<R: std::io::Read> {
reader: R,
}
impl<R: std::io::Read> Scanner<R> {
/// let stdin = std::io::stdin();
/// let mut sc = Scanner::new(stdin.lock());
pub fn new(reader: R) -> Self { Self { reader: reader } }
pub fn scan<T: std::str::FromStr>(&mut self) -> T {
use std::io::Read;
self.reader.by_ref().bytes().map(|c| c.unwrap() as char)
.skip_while(|c| c.is_whitespace())
.take_while(|c| !c.is_whitespace())
.collect::<String>().parse::<T>().ok().unwrap()
}
}
// #[allow(warnings)]
fn main() {
use std::io::Write;
let stdin = std::io::stdin();
let mut sc = Scanner::new(std::io::BufReader::new(stdin.lock()));
let stdout = std::io::stdout();
let out = &mut std::io::BufWriter::new(stdout.lock());
let mut n: usize = sc.scan();
let h = n / 3600;
n %= 3600;
let m = n / 60;
let s = n % 60;
writeln!(out, "{:02}:{:02}:{:02}", h, m, s).unwrap();
}
| 24.9 | 69 | 0.528112 |
f4028742c1580a191e6a512b133a4b1890a047e9 | 24,450 | //! Rust Excel/OpenDocument reader
//!
//! # Status
//!
//! **calamine** is a pure Rust library to read Excel and OpenDocument Spreadsheet files.
//!
//! Read both cell values and vba project.
//!
//! # Examples
//! ```
//! use calamine::{Reader, open_workbook, Xlsx, DataType};
//!
//! // opens a new workbook
//! # let path = format!("{}/tests/issue3.xlsm", env!("CARGO_MANIFEST_DIR"));
//! let mut workbook: Xlsx<_> = open_workbook(path).expect("Cannot open file");
//!
//! // Read whole worksheet data and provide some statistics
//! if let Some(Ok(range)) = workbook.worksheet_range("Sheet1") {
//! let total_cells = range.get_size().0 * range.get_size().1;
//! let non_empty_cells: usize = range.used_cells().count();
//! println!("Found {} cells in 'Sheet1', including {} non empty cells",
//! total_cells, non_empty_cells);
//! // alternatively, we can manually filter rows
//! assert_eq!(non_empty_cells, range.rows()
//! .flat_map(|r| r.iter().filter(|&c| c != &DataType::Empty)).count());
//! }
//!
//! // Check if the workbook has a vba project
//! if let Some(Ok(mut vba)) = workbook.vba_project() {
//! let vba = vba.to_mut();
//! let module1 = vba.get_module("Module 1").unwrap();
//! println!("Module 1 code:");
//! println!("{}", module1);
//! for r in vba.get_references() {
//! if r.is_missing() {
//! println!("Reference {} is broken or not accessible", r.name);
//! }
//! }
//! }
//!
//! // You can also get defined names definition (string representation only)
//! for name in workbook.defined_names() {
//! println!("name: {}, formula: {}", name.0, name.1);
//! }
//!
//! // Now get all formula!
//! let sheets = workbook.sheet_names().to_owned();
//! for s in sheets {
//! println!("found {} formula in '{}'",
//! workbook
//! .worksheet_formula(&s)
//! .expect("sheet not found")
//! .expect("error while getting formula")
//! .rows().flat_map(|r| r.iter().filter(|f| !f.is_empty()))
//! .count(),
//! s);
//! }
//! ```
#![deny(missing_docs)]
extern crate byteorder;
extern crate codepage;
extern crate encoding_rs;
extern crate quick_xml;
#[macro_use]
extern crate serde;
extern crate zip;
#[macro_use]
extern crate log;
#[macro_use]
mod utils;
mod auto;
mod cfb;
mod datatype;
mod ods;
mod xls;
mod xlsb;
mod xlsx;
mod de;
mod errors;
pub mod vba;
use serde::de::DeserializeOwned;
use std::borrow::Cow;
use std::cmp::{max, min};
use std::fmt;
use std::fs::File;
use std::io::{BufReader, Read, Seek};
use std::ops::{Index, IndexMut};
use std::path::Path;
pub use auto::{open_workbook_auto, Sheets};
pub use datatype::DataType;
pub use de::{DeError, RangeDeserializer, RangeDeserializerBuilder, ToCellDeserializer};
pub use errors::Error;
pub use ods::{Ods, OdsError};
pub use xls::{Xls, XlsError};
pub use xlsb::{Xlsb, XlsbError};
pub use xlsx::{Xlsx, XlsxError};
use vba::VbaProject;
// https://msdn.microsoft.com/en-us/library/office/ff839168.aspx
/// An enum to represent all different errors that can appear as
/// a value in a worksheet cell
#[derive(Debug, Clone, PartialEq)]
pub enum CellErrorType {
/// Division by 0 error
Div0,
/// Unavailable value error
NA,
/// Invalid name error
Name,
/// Null value error
Null,
/// Number error
Num,
/// Invalid cell reference error
Ref,
/// Value error
Value,
/// Getting data
GettingData,
}
impl fmt::Display for CellErrorType {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
CellErrorType::Div0 => write!(f, "#DIV/0!"),
CellErrorType::NA => write!(f, "#N/A"),
CellErrorType::Name => write!(f, "#NAME?"),
CellErrorType::Null => write!(f, "#NULL!"),
CellErrorType::Num => write!(f, "#NUM!"),
CellErrorType::Ref => write!(f, "#REF!"),
CellErrorType::Value => write!(f, "#VALUE!"),
CellErrorType::GettingData => write!(f, "#DATA!"),
}
}
}
/// Common file metadata
///
/// Depending on file type, some extra information may be stored
/// in the Reader implementations
#[derive(Debug, Default)]
pub struct Metadata {
sheets: Vec<String>,
/// Map of sheet names/sheet path within zip archive
names: Vec<(String, String)>,
}
// FIXME `Reader` must only be seek `Seek` for `Xls::xls`. Because of the present API this limits
// the kinds of readers (other) data in formats can be read from.
/// A trait to share spreadsheets reader functions accross different `FileType`s
pub trait Reader: Sized {
/// Inner reader type
type RS: Read + Seek;
/// Error specific to file type
type Error: ::std::fmt::Debug + From<::std::io::Error>;
/// Creates a new instance.
fn new(reader: Self::RS) -> Result<Self, Self::Error>;
/// Gets `VbaProject`
fn vba_project(&mut self) -> Option<Result<Cow<VbaProject>, Self::Error>>;
/// Initialize
fn metadata(&self) -> &Metadata;
/// Read worksheet data in corresponding worksheet path
fn worksheet_range(&mut self, name: &str) -> Option<Result<Range<DataType>, Self::Error>>;
/// Read worksheet formula in corresponding worksheet path
fn worksheet_formula(&mut self, _: &str) -> Option<Result<Range<String>, Self::Error>>;
/// Get all sheet names of this workbook, in workbook order
///
/// # Examples
/// ```
/// use calamine::{Xlsx, open_workbook, Reader};
///
/// # let path = format!("{}/tests/issue3.xlsm", env!("CARGO_MANIFEST_DIR"));
/// let mut workbook: Xlsx<_> = open_workbook(path).unwrap();
/// println!("Sheets: {:#?}", workbook.sheet_names());
/// ```
fn sheet_names(&self) -> &[String] {
&self.metadata().sheets
}
/// Get all defined names (Ranges names etc)
fn defined_names(&self) -> &[(String, String)] {
&self.metadata().names
}
/// Get the nth worksheet. Shortcut for getting the nth
/// sheet_name, then the corresponding worksheet.
fn worksheet_range_at(&mut self, n: usize) -> Option<Result<Range<DataType>, Self::Error>> {
let name = if let Some(name) = self.sheet_names().get(n) { name } else { return None; }.to_string();
self.worksheet_range(&name)
}
}
/// Convenient function to open a file with a BufReader<File>
pub fn open_workbook<R, P>(path: P) -> Result<R, R::Error>
where
R: Reader<RS = BufReader<File>>,
P: AsRef<Path>,
{
let file = BufReader::new(File::open(path)?);
R::new(file)
}
/// A trait to constrain cells
pub trait CellType: Default + Clone + PartialEq {}
impl<T: Default + Clone + PartialEq> CellType for T {}
/// A struct to hold cell position and value
#[derive(Debug, Clone)]
pub struct Cell<T: CellType> {
/// Position for the cell (row, column)
pos: (u32, u32),
/// Value for the cell
val: T,
}
impl<T: CellType> Cell<T> {
/// Creates a new `Cell`
pub fn new(position: (u32, u32), value: T) -> Cell<T> {
Cell {
pos: position,
val: value,
}
}
/// Gets `Cell` position
pub fn get_position(&self) -> (u32, u32) {
self.pos
}
/// Gets `Cell` value
pub fn get_value(&self) -> &T {
&self.val
}
}
/// A struct which represents a squared selection of cells
#[derive(Debug, Default, Clone)]
pub struct Range<T: CellType> {
start: (u32, u32),
end: (u32, u32),
inner: Vec<T>,
}
impl<T: CellType> Range<T> {
/// Creates a new non-empty `Range`
///
/// When possible, prefer the more efficient `Range::from_sparse`
///
/// # Panics
///
/// Panics if start.0 > end.0 or start.1 > end.1
#[inline]
pub fn new(start: (u32, u32), end: (u32, u32)) -> Range<T> {
assert!(start <= end, "invalid range bounds");
Range {
start: start,
end: end,
inner: vec![T::default(); ((end.0 - start.0 + 1) * (end.1 - start.1 + 1)) as usize],
}
}
/// Creates a new empty range
#[inline]
pub fn empty() -> Range<T> {
Range {
start: (0, 0),
end: (0, 0),
inner: Vec::new(),
}
}
/// Get top left cell position (row, column)
#[inline]
pub fn start(&self) -> Option<(u32, u32)> {
if self.is_empty() {
None
} else {
Some(self.start)
}
}
/// Get bottom right cell position (row, column)
#[inline]
pub fn end(&self) -> Option<(u32, u32)> {
if self.is_empty() {
None
} else {
Some(self.end)
}
}
/// Get column width
#[inline]
pub fn width(&self) -> usize {
if self.is_empty() {
0
} else {
(self.end.1 - self.start.1 + 1) as usize
}
}
/// Get column width
#[inline]
pub fn height(&self) -> usize {
if self.is_empty() {
0
} else {
(self.end.0 - self.start.0 + 1) as usize
}
}
/// Get size in (height, width) format
#[inline]
pub fn get_size(&self) -> (usize, usize) {
(self.height(), self.width())
}
/// Is range empty
#[inline]
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Creates a `Range` from a coo sparse vector of `Cell`s.
///
/// Coordinate list (COO) is the natural way cells are stored
/// Inner size is defined only by non empty.
///
/// cells: `Vec` of non empty `Cell`s, sorted by row
///
/// # Panics
///
/// panics when a `Cell` row is lower than the first `Cell` row or
/// bigger than the last `Cell` row.
pub fn from_sparse(cells: Vec<Cell<T>>) -> Range<T> {
if cells.is_empty() {
Range::empty()
} else {
// search bounds
let row_start = cells.first().unwrap().pos.0;
let row_end = cells.last().unwrap().pos.0;
let mut col_start = ::std::u32::MAX;
let mut col_end = 0;
for c in cells.iter().map(|c| c.pos.1) {
if c < col_start {
col_start = c;
}
if c > col_end {
col_end = c
}
}
let width = col_end - col_start + 1;
let len = ((row_end - row_start + 1) * width) as usize;
let mut v = vec![T::default(); len];
v.shrink_to_fit();
for c in cells {
let idx = ((c.pos.0 - row_start) * width + (c.pos.1 - col_start)) as usize;
v[idx] = c.val;
}
Range {
start: (row_start, col_start),
end: (row_end, col_end),
inner: v,
}
}
}
/// Set inner value from absolute position
///
/// # Remarks
///
/// Will try to resize inner structure if the value is out of bounds.
/// For relative positions, use Index trait
///
/// Try to avoid this method as much as possible and prefer initializing
/// the `Range` with `from_sparse` constructor.
///
/// # Panics
///
/// If absolute_position > Cell start
///
/// # Examples
/// ```
/// use calamine::{Range, DataType};
///
/// let mut range = Range::new((0, 0), (5, 2));
/// assert_eq!(range.get_value((2, 1)), Some(&DataType::Empty));
/// range.set_value((2, 1), DataType::Float(1.0));
/// assert_eq!(range.get_value((2, 1)), Some(&DataType::Float(1.0)));
/// ```
pub fn set_value(&mut self, absolute_position: (u32, u32), value: T) {
assert!(
self.start.0 <= absolute_position.0 && self.start.1 <= absolute_position.1,
"absolute_position out of bounds"
);
// check if we need to change range dimension (strangely happens sometimes ...)
match (
self.end.0 < absolute_position.0,
self.end.1 < absolute_position.1,
) {
(false, false) => (), // regular case, position within bounds
(true, false) => {
let len = (absolute_position.0 - self.end.0 + 1) as usize * self.width();
self.inner.extend_from_slice(&vec![T::default(); len]);
self.end.0 = absolute_position.0;
}
// missing some rows
(e, true) => {
let height = if e {
(absolute_position.0 - self.start.0 + 1) as usize
} else {
self.height()
};
let width = (absolute_position.1 - self.start.1 + 1) as usize;
let old_width = self.width();
let mut data = Vec::with_capacity(width * height);
let empty = vec![T::default(); width - old_width];
for sce in self.inner.chunks(old_width) {
data.extend_from_slice(sce);
data.extend_from_slice(&empty);
}
data.extend_from_slice(&vec![T::default(); width * (height - self.height())]);
if e {
self.end = absolute_position
} else {
self.end.1 = absolute_position.1
}
self.inner = data;
} // missing some columns
}
let pos = (
absolute_position.0 - self.start.0,
absolute_position.1 - self.start.1,
);
let idx = pos.0 as usize * self.width() + pos.1 as usize;
self.inner[idx] = value;
}
/// Get cell value from **absolute position**.
///
/// If the `absolute_position` is out of range, returns `None`, else returns the cell value.
/// The coordinate format is (row, column).
///
/// # Warnings
///
/// For relative positions, use Index trait
///
/// # Remarks
///
/// Absolute position is in *sheet* referential while relative position is in *range* referential.
///
/// For instance if we consider range *C2:H38*:
/// - `(0, 0)` absolute is "A1" and thus this function returns `None`
/// - `(0, 0)` relative is "C2" and is returned by the `Index` trait (i.e `my_range[(0, 0)]`)
///
/// # Examples
/// ```
/// use calamine::{Range, DataType};
///
/// let range: Range<usize> = Range::new((1, 0), (5, 2));
/// assert_eq!(range.get_value((0, 0)), None);
/// assert_eq!(range[(0, 0)], 0);
/// ```
pub fn get_value(&self, absolute_position: (u32, u32)) -> Option<&T> {
let p = absolute_position;
if p.0 >= self.start.0 && p.0 <= self.end.0 && p.1 >= self.start.1 && p.1 <= self.end.1 {
return self.get((
(absolute_position.0 - self.start.0) as usize,
(absolute_position.1 - self.start.1) as usize,
));
}
None
}
/// Get cell value from **relative position**.
pub fn get(&self, relative_position: (usize, usize)) -> Option<&T> {
let (row, col) = relative_position;
self.inner.get(row * self.width() + col)
}
/// Get an iterator over inner rows
///
/// # Examples
/// ```
/// use calamine::{Range, DataType};
///
/// let range: Range<DataType> = Range::new((0, 0), (5, 2));
/// // with rows item row: &[DataType]
/// assert_eq!(range.rows().map(|r| r.len()).sum::<usize>(), 18);
/// ```
pub fn rows(&self) -> Rows<T> {
if self.inner.is_empty() {
Rows { inner: None }
} else {
let width = self.width();
Rows {
inner: Some(self.inner.chunks(width)),
}
}
}
/// Get an iterator over used cells only
pub fn used_cells(&self) -> UsedCells<T> {
UsedCells {
width: self.width(),
inner: self.inner.iter().enumerate(),
}
}
/// Get an iterator over all cells in this range
pub fn cells(&self) -> Cells<T> {
Cells {
width: self.width(),
inner: self.inner.iter().enumerate(),
}
}
/// Build a `RangeDeserializer` from this configuration.
///
/// # Example
///
/// ```
/// # use calamine::{Reader, Error, open_workbook, Xlsx, RangeDeserializerBuilder};
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<(), Error> {
/// let path = format!("{}/tests/temperature.xlsx", env!("CARGO_MANIFEST_DIR"));
/// let mut workbook: Xlsx<_> = open_workbook(path)?;
/// let mut sheet = workbook.worksheet_range("Sheet1")
/// .ok_or(Error::Msg("Cannot find 'Sheet1'"))??;
/// let mut iter = sheet.deserialize()?;
///
/// if let Some(result) = iter.next() {
/// let (label, value): (String, f64) = result?;
/// assert_eq!(label, "celsius");
/// assert_eq!(value, 22.2222);
///
/// Ok(())
/// } else {
/// return Err(From::from("expected at least one record but got none"));
/// }
/// }
/// ```
pub fn deserialize<'a, D>(&'a self) -> Result<RangeDeserializer<'a, T, D>, DeError>
where
T: ToCellDeserializer<'a>,
D: DeserializeOwned,
{
RangeDeserializerBuilder::new().from_range(self)
}
/// Build a new `Range` out of this range
///
/// # Remarks
///
/// Cells within this range will be cloned, cells out of it will be set to Empty
///
/// # Example
///
/// ```
/// # use calamine::{Range, DataType};
///
/// fn example() {
/// let mut a = Range::new((1, 1), (3, 3));
/// a.set_value((1, 1), DataType::Bool(true));
/// a.set_value((2, 2), DataType::Bool(true));
///
/// let b = a.range((2, 2), (5, 5));
/// assert_eq!(b.get_value((2, 2)), Some(&DataType::Bool(true)));
/// assert_eq!(b.get_value((3, 3)), Some(&DataType::Empty));
///
/// let c = a.range((0, 0), (2, 2));
/// assert_eq!(c.get_value((0, 0)), Some(&DataType::Empty));
/// assert_eq!(c.get_value((1, 1)), Some(&DataType::Bool(true)));
/// assert_eq!(c.get_value((2, 2)), Some(&DataType::Bool(true)));
/// }
pub fn range(&self, start: (u32, u32), end: (u32, u32)) -> Range<T> {
let mut other = Range::new(start, end);
let (self_start_row, self_start_col) = self.start;
let (self_end_row, self_end_col) = self.end;
let (other_start_row, other_start_col) = other.start;
let (other_end_row, other_end_col) = other.end;
// copy data from self to other
let start_row = max(self_start_row, other_start_row);
let end_row = min(self_end_row, other_end_row);
let start_col = max(self_start_col, other_start_col);
let end_col = min(self_end_col, other_end_col);
if start_row > end_row || start_col > end_col {
return other;
}
let self_width = self.width();
let other_width = other.width();
// change referential
//
// we want to copy range: start_row..(end_row + 1)
// In self referencial it is (start_row - self_start_row)..(end_row + 1 - self_start_row)
let self_row_start = (start_row - self_start_row) as usize;
let self_row_end = (end_row + 1 - self_start_row) as usize;
let self_col_start = (start_col - self_start_col) as usize;
let self_col_end = (end_col + 1 - self_start_col) as usize;
let other_row_start = (start_row - other_start_row) as usize;
let other_row_end = (end_row + 1 - other_start_row) as usize;
let other_col_start = (start_col - other_start_col) as usize;
let other_col_end = (end_col + 1 - other_start_col) as usize;
{
let self_rows = self
.inner
.chunks(self_width)
.take(self_row_end)
.skip(self_row_start);
let other_rows = other
.inner
.chunks_mut(other_width)
.take(other_row_end)
.skip(other_row_start);
for (self_row, other_row) in self_rows.zip(other_rows) {
let self_cols = &self_row[self_col_start..self_col_end];
let other_cols = &mut other_row[other_col_start..other_col_end];
other_cols.clone_from_slice(self_cols);
}
}
other
}
}
impl<T: CellType> Index<usize> for Range<T> {
type Output = [T];
fn index(&self, index: usize) -> &[T] {
let width = self.width();
&self.inner[index * width..(index + 1) * width]
}
}
impl<T: CellType> Index<(usize, usize)> for Range<T> {
type Output = T;
fn index(&self, index: (usize, usize)) -> &T {
let (height, width) = self.get_size();
assert!(index.1 < width && index.0 < height, "index out of bounds");
&self.inner[index.0 * width + index.1]
}
}
impl<T: CellType> IndexMut<usize> for Range<T> {
fn index_mut(&mut self, index: usize) -> &mut [T] {
let width = self.width();
&mut self.inner[index * width..(index + 1) * width]
}
}
impl<T: CellType> IndexMut<(usize, usize)> for Range<T> {
fn index_mut(&mut self, index: (usize, usize)) -> &mut T {
let (height, width) = self.get_size();
assert!(index.1 < width && index.0 < height, "index out of bounds");
&mut self.inner[index.0 * width + index.1]
}
}
/// A struct to iterate over all cells
#[derive(Debug)]
pub struct Cells<'a, T: 'a + CellType> {
width: usize,
inner: ::std::iter::Enumerate<::std::slice::Iter<'a, T>>,
}
impl<'a, T: 'a + CellType> Iterator for Cells<'a, T> {
type Item = (usize, usize, &'a T);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(i, v)| {
let row = i / self.width;
let col = i % self.width;
(row, col, v)
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a, T: 'a + CellType> DoubleEndedIterator for Cells<'a, T> {
fn next_back(&mut self) -> Option<Self::Item> {
self.inner.next_back().map(|(i, v)| {
let row = i / self.width;
let col = i % self.width;
(row, col, v)
})
}
}
impl<'a, T: 'a + CellType> ExactSizeIterator for Cells<'a, T> {}
/// A struct to iterate over used cells
#[derive(Debug)]
pub struct UsedCells<'a, T: 'a + CellType> {
width: usize,
inner: ::std::iter::Enumerate<::std::slice::Iter<'a, T>>,
}
impl<'a, T: 'a + CellType> Iterator for UsedCells<'a, T> {
type Item = (usize, usize, &'a T);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.by_ref()
.find(|&(_, v)| v != &T::default())
.map(|(i, v)| {
let row = i / self.width;
let col = i % self.width;
(row, col, v)
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (_, up) = self.inner.size_hint();
(0, up)
}
}
impl<'a, T: 'a + CellType> DoubleEndedIterator for UsedCells<'a, T> {
fn next_back(&mut self) -> Option<Self::Item> {
self.inner
.by_ref()
.rfind(|&(_, v)| v != &T::default())
.map(|(i, v)| {
let row = i / self.width;
let col = i % self.width;
(row, col, v)
})
}
}
/// An iterator to read `Range` struct row by row
#[derive(Debug)]
pub struct Rows<'a, T: 'a + CellType> {
inner: Option<::std::slice::Chunks<'a, T>>,
}
impl<'a, T: 'a + CellType> Iterator for Rows<'a, T> {
type Item = &'a [T];
fn next(&mut self) -> Option<Self::Item> {
self.inner.as_mut().and_then(|c| c.next())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner
.as_ref()
.map_or((0, Some(0)), |ch| ch.size_hint())
}
}
impl<'a, T: 'a + CellType> DoubleEndedIterator for Rows<'a, T> {
fn next_back(&mut self) -> Option<Self::Item> {
self.inner.as_mut().and_then(|c| c.next_back())
}
}
impl<'a, T: 'a + CellType> ExactSizeIterator for Rows<'a, T> {}
| 31.630013 | 108 | 0.537096 |
29053c08c7b84cdd0fd595bd3986f3b42b056f7c | 1,863 | use std::ffi::OsString;
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::ptr;
use winapi::shared::windef::*;
use winapi::um::winuser::*;
pub struct Window {
handle: HWND,
}
#[allow(dead_code)]
impl Window {
pub fn find(name: &str) -> Vec<Window> {
// todo: how optimal is that string conversion?
let title = OsString::from(name)
.as_os_str()
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
let mut handles = Vec::new();
let mut handle = ptr::null_mut();
loop {
handle =
unsafe { FindWindowExW(ptr::null_mut(), handle, ptr::null_mut(), title.as_ptr()) };
if handle == ptr::null_mut() {
break;
} else {
handles.push(Window { handle });
}
}
handles
}
pub fn foreground() -> Option<Window> {
let handle = unsafe { GetForegroundWindow() };
if handle == ptr::null_mut() {
None
} else {
Some(Window { handle: handle })
}
}
pub fn is_valid(&self) -> bool {
unsafe { IsWindow(self.handle) > 0 }
}
pub fn is_foreground(&self) -> bool {
unsafe { GetForegroundWindow() == self.handle }
}
pub fn is_full_screen(&self) -> bool {
unsafe {
let w = GetSystemMetrics(SM_CXSCREEN);
let h = GetSystemMetrics(SM_CYSCREEN);
if GetWindowLongW(self.handle, GWL_EXSTYLE) as u32 & WS_EX_TOPMOST > 0 {
let mut rect: RECT = mem::zeroed();
GetWindowRect(self.handle, &mut rect);
if (w == (rect.right - rect.left)) && (h == (rect.bottom - rect.top)) {
return true;
}
}
}
false
}
}
| 25.520548 | 99 | 0.497048 |
905a8e4071df077036cab24182ab4a14473f1f1d | 820 | extern crate reqwest;
extern crate serde;
extern crate hyper;
use std::result::Result;
use http::hyper::header::{Authorization, Basic};
use serde::de::DeserializeOwned;
pub struct Client {
client: reqwest::Client,
auth: Basic,
}
impl Client {
pub fn new(username: &str, password: &str) -> Client {
Client {
client: reqwest::Client::new(),
auth: Basic {
username: username.to_string(),
password: Some(password.to_string()),
},
}
}
pub fn get<T: DeserializeOwned>(&self, uri: &str) -> Result<T, reqwest::Error> {
let mut http_response = self.client.get(uri)
.header(Authorization(self.auth.clone()))
.send()?;
let resp: T = http_response.json()?;
Ok(resp)
}
}
| 23.428571 | 84 | 0.568293 |
260b189825e92f12855c9e3c6d65d03e88de4b4d | 10,035 | //! Utilities to write RDF graphs and datasets.
use crate::io::{DatasetFormat, GraphFormat};
use crate::model::*;
use rio_api::formatter::TriplesFormatter;
use rio_api::model as rio;
use rio_xml::RdfXmlFormatter;
use std::io::{self, Write};
/// A serializer for RDF graph serialization formats.
///
/// It currently supports the following formats:
/// * [N-Triples](https://www.w3.org/TR/n-triples/) ([`GraphFormat::NTriples`](super::GraphFormat::NTriples))
/// * [Turtle](https://www.w3.org/TR/turtle/) ([`GraphFormat::Turtle`](super::GraphFormat::Turtle))
/// * [RDF/XML](https://www.w3.org/TR/rdf-syntax-grammar/) ([`GraphFormat::RdfXml`](super::GraphFormat::RdfXml))
///
/// ```
/// use oxigraph::io::{GraphFormat, GraphSerializer};
/// use oxigraph::model::*;
///
/// let mut buffer = Vec::new();
/// let mut writer = GraphSerializer::from_format(GraphFormat::NTriples).triple_writer(&mut buffer)?;
/// writer.write(&Triple {
/// subject: NamedNode::new("http://example.com/s")?.into(),
/// predicate: NamedNode::new("http://example.com/p")?,
/// object: NamedNode::new("http://example.com/o")?.into()
/// })?;
/// writer.finish()?;
///
///assert_eq!(buffer.as_slice(), "<http://example.com/s> <http://example.com/p> <http://example.com/o> .\n".as_bytes());
/// # Result::<_,Box<dyn std::error::Error>>::Ok(())
/// ```
#[allow(missing_copy_implementations)]
pub struct GraphSerializer {
format: GraphFormat,
}
impl GraphSerializer {
/// Builds a serializer for the given format
#[inline]
pub fn from_format(format: GraphFormat) -> Self {
Self { format }
}
/// Returns a [`TripleWriter`] allowing writing triples into the given [`Write`](std::io::Write) implementation
pub fn triple_writer<W: Write>(&self, writer: W) -> io::Result<TripleWriter<W>> {
Ok(TripleWriter {
formatter: match self.format {
GraphFormat::NTriples | GraphFormat::Turtle => TripleWriterKind::NTriples(writer),
GraphFormat::RdfXml => TripleWriterKind::RdfXml(RdfXmlFormatter::new(writer)?),
},
})
}
}
/// Allows writing triples.
/// Could be built using a [`GraphSerializer`].
///
/// Warning: Do not forget to run the [`finish`](TripleWriter::finish()) method to properly write the last bytes of the file.
///
/// ```
/// use oxigraph::io::{GraphFormat, GraphSerializer};
/// use oxigraph::model::*;
///
/// let mut buffer = Vec::new();
/// let mut writer = GraphSerializer::from_format(GraphFormat::NTriples).triple_writer(&mut buffer)?;
/// writer.write(&Triple {
/// subject: NamedNode::new("http://example.com/s")?.into(),
/// predicate: NamedNode::new("http://example.com/p")?,
/// object: NamedNode::new("http://example.com/o")?.into()
/// })?;
/// writer.finish()?;
///
///assert_eq!(buffer.as_slice(), "<http://example.com/s> <http://example.com/p> <http://example.com/o> .\n".as_bytes());
/// # Result::<_,Box<dyn std::error::Error>>::Ok(())
/// ```
#[must_use]
pub struct TripleWriter<W: Write> {
formatter: TripleWriterKind<W>,
}
enum TripleWriterKind<W: Write> {
NTriples(W),
RdfXml(RdfXmlFormatter<W>),
}
impl<W: Write> TripleWriter<W> {
/// Writes a triple
pub fn write<'a>(&mut self, triple: impl Into<TripleRef<'a>>) -> io::Result<()> {
let triple = triple.into();
match &mut self.formatter {
TripleWriterKind::NTriples(writer) => {
writeln!(writer, "{} .", triple)?;
}
TripleWriterKind::RdfXml(formatter) => formatter.format(&rio::Triple {
subject: match triple.subject {
SubjectRef::NamedNode(node) => rio::NamedNode { iri: node.as_str() }.into(),
SubjectRef::BlankNode(node) => rio::BlankNode { id: node.as_str() }.into(),
SubjectRef::Triple(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"RDF/XML does not support RDF-star yet",
))
}
},
predicate: rio::NamedNode {
iri: triple.predicate.as_str(),
},
object: match triple.object {
TermRef::NamedNode(node) => rio::NamedNode { iri: node.as_str() }.into(),
TermRef::BlankNode(node) => rio::BlankNode { id: node.as_str() }.into(),
TermRef::Literal(literal) => if literal.is_plain() {
if let Some(language) = literal.language() {
rio::Literal::LanguageTaggedString {
value: literal.value(),
language,
}
} else {
rio::Literal::Simple {
value: literal.value(),
}
}
} else {
rio::Literal::Typed {
value: literal.value(),
datatype: rio::NamedNode {
iri: literal.datatype().as_str(),
},
}
}
.into(),
TermRef::Triple(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"RDF/XML does not support RDF-star yet",
))
}
},
})?,
}
Ok(())
}
/// Writes the last bytes of the file
pub fn finish(self) -> io::Result<()> {
match self.formatter {
TripleWriterKind::NTriples(mut writer) => writer.flush(),
TripleWriterKind::RdfXml(formatter) => formatter.finish()?.flush(), //TODO: remove flush when the next version of Rio is going to be released
}
}
}
/// A serializer for RDF graph serialization formats.
///
/// It currently supports the following formats:
/// * [N-Quads](https://www.w3.org/TR/n-quads/) ([`DatasetFormat::NQuads`](super::DatasetFormat::NQuads))
/// * [TriG](https://www.w3.org/TR/trig/) ([`DatasetFormat::TriG`](super::DatasetFormat::TriG))
///
/// ```
/// use oxigraph::io::{DatasetFormat, DatasetSerializer};
/// use oxigraph::model::*;
///
/// let mut buffer = Vec::new();
/// let mut writer = DatasetSerializer::from_format(DatasetFormat::NQuads).quad_writer(&mut buffer)?;
/// writer.write(&Quad {
/// subject: NamedNode::new("http://example.com/s")?.into(),
/// predicate: NamedNode::new("http://example.com/p")?,
/// object: NamedNode::new("http://example.com/o")?.into(),
/// graph_name: NamedNode::new("http://example.com/g")?.into(),
/// })?;
/// writer.finish()?;
///
///assert_eq!(buffer.as_slice(), "<http://example.com/s> <http://example.com/p> <http://example.com/o> <http://example.com/g> .\n".as_bytes());
/// # Result::<_,Box<dyn std::error::Error>>::Ok(())
/// ```
#[allow(missing_copy_implementations)]
pub struct DatasetSerializer {
format: DatasetFormat,
}
impl DatasetSerializer {
/// Builds a serializer for the given format
#[inline]
pub fn from_format(format: DatasetFormat) -> Self {
Self { format }
}
/// Returns a [`QuadWriter`] allowing writing triples into the given [`Write`](std::io::Write) implementation
#[allow(clippy::unnecessary_wraps)]
pub fn quad_writer<W: Write>(&self, writer: W) -> io::Result<QuadWriter<W>> {
Ok(QuadWriter {
formatter: match self.format {
DatasetFormat::NQuads => QuadWriterKind::NQuads(writer),
DatasetFormat::TriG => QuadWriterKind::TriG(writer),
},
})
}
}
/// Allows writing triples.
/// Could be built using a [`DatasetSerializer`].
///
/// Warning: Do not forget to run the [`finish`](QuadWriter::finish()) method to properly write the last bytes of the file.
///
/// ```
/// use oxigraph::io::{DatasetFormat, DatasetSerializer};
/// use oxigraph::model::*;
///
/// let mut buffer = Vec::new();
/// let mut writer = DatasetSerializer::from_format(DatasetFormat::NQuads).quad_writer(&mut buffer)?;
/// writer.write(&Quad {
/// subject: NamedNode::new("http://example.com/s")?.into(),
/// predicate: NamedNode::new("http://example.com/p")?,
/// object: NamedNode::new("http://example.com/o")?.into(),
/// graph_name: NamedNode::new("http://example.com/g")?.into(),
/// })?;
/// writer.finish()?;
///
///assert_eq!(buffer.as_slice(), "<http://example.com/s> <http://example.com/p> <http://example.com/o> <http://example.com/g> .\n".as_bytes());
/// # Result::<_,Box<dyn std::error::Error>>::Ok(())
/// ```
#[must_use]
pub struct QuadWriter<W: Write> {
formatter: QuadWriterKind<W>,
}
enum QuadWriterKind<W: Write> {
NQuads(W),
TriG(W),
}
impl<W: Write> QuadWriter<W> {
/// Writes a quad
pub fn write<'a>(&mut self, quad: impl Into<QuadRef<'a>>) -> io::Result<()> {
let quad = quad.into();
match &mut self.formatter {
QuadWriterKind::NQuads(writer) => {
writeln!(writer, "{} .", quad)?;
}
QuadWriterKind::TriG(writer) => {
if quad.graph_name == GraphNameRef::DefaultGraph {
writeln!(
writer,
"GRAPH {} {{ {} }}",
quad.graph_name,
TripleRef::from(quad)
)?;
} else {
writeln!(writer, "{} .", quad)?;
}
}
}
Ok(())
}
/// Writes the last bytes of the file
#[allow(clippy::unused_self, clippy::unnecessary_wraps)]
pub fn finish(self) -> io::Result<()> {
match self.formatter {
QuadWriterKind::NQuads(mut writer) | QuadWriterKind::TriG(mut writer) => writer.flush(),
}
}
}
| 38.011364 | 153 | 0.546986 |
232f5dc6104b7f1b29fc291525a682ae4ab37bb1 | 2,203 | #![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
//! <p>The transactional data APIs for Amazon QLDB</p>
//! <note>
//! <p>Instead of interacting directly with this API, we recommend using the QLDB driver
//! or the QLDB shell to execute data transactions on a ledger.</p>
//! <ul>
//! <li>
//! <p>If you are working with an AWS SDK, use the QLDB driver. The driver provides
//! a high-level abstraction layer above this <i>QLDB Session</i> data
//! plane and manages <code>SendCommand</code> API calls for you. For information and
//! a list of supported programming languages, see <a href="https://docs.aws.amazon.com/qldb/latest/developerguide/getting-started-driver.html">Getting started
//! with the driver</a> in the <i>Amazon QLDB Developer
//! Guide</i>.</p>
//! </li>
//! <li>
//! <p>If you are working with the AWS Command Line Interface (AWS CLI), use the
//! QLDB shell. The shell is a command line interface that uses the QLDB driver to
//! interact with a ledger. For information, see <a href="https://docs.aws.amazon.com/qldb/latest/developerguide/data-shell.html">Accessing Amazon QLDB using the
//! QLDB shell</a>.</p>
//! </li>
//! </ul>
//! </note>
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use error_meta::Error;
pub use config::Config;
mod aws_endpoint;
#[cfg(feature = "client")]
pub mod client;
pub mod config;
pub mod error;
mod error_meta;
pub mod input;
mod json_deser;
mod json_errors;
mod json_ser;
pub mod model;
pub mod operation;
mod operation_deser;
mod operation_ser;
pub mod output;
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use smithy_http::byte_stream::ByteStream;
pub use smithy_http::result::SdkError;
pub use smithy_types::Blob;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("qldbsession", PKG_VERSION);
pub use aws_auth::Credentials;
pub use aws_types::region::Region;
#[cfg(feature = "client")]
pub use client::Client;
pub use smithy_http::endpoint::Endpoint;
| 36.716667 | 161 | 0.735361 |
903b590fb1ee15d3e499f5ef1ae4ca2229e44483 | 8,228 | #![allow(clippy::integer_arithmetic)]
use {
rayon::{iter::ParallelIterator, prelude::*},
serial_test::serial,
safecoin_gossip::{
cluster_info::{compute_retransmit_peers, ClusterInfo},
contact_info::ContactInfo,
deprecated::{shuffle_peers_and_index, sorted_retransmit_peers_and_stakes},
},
safecoin_sdk::{pubkey::Pubkey, signer::keypair::Keypair},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet},
sync::{
mpsc::{channel, Receiver, Sender, TryRecvError},
Arc, Mutex,
},
time::Instant,
},
};
type Nodes = HashMap<Pubkey, (bool, HashSet<i32>, Receiver<(i32, bool)>)>;
fn num_threads() -> usize {
num_cpus::get()
}
/// Search for the a node with the given balance
fn find_insert_shred(id: &Pubkey, shred: i32, batches: &mut [Nodes]) {
batches.par_iter_mut().for_each(|batch| {
if batch.contains_key(id) {
let _ = batch.get_mut(id).unwrap().1.insert(shred);
}
});
}
fn retransmit(
mut shuffled_nodes: Vec<ContactInfo>,
senders: &HashMap<Pubkey, Sender<(i32, bool)>>,
cluster: &ClusterInfo,
fanout: usize,
shred: i32,
retransmit: bool,
) -> i32 {
let mut seed = [0; 32];
let mut my_index = 0;
let mut index = 0;
shuffled_nodes.retain(|c| {
if c.id == cluster.id() {
my_index = index;
false
} else {
index += 1;
true
}
});
seed[0..4].copy_from_slice(&shred.to_le_bytes());
let shuffled_indices: Vec<_> = (0..shuffled_nodes.len()).collect();
let (neighbors, children) = compute_retransmit_peers(fanout, my_index, &shuffled_indices);
children.into_iter().for_each(|i| {
let s = senders.get(&shuffled_nodes[i].id).unwrap();
let _ = s.send((shred, retransmit));
});
if retransmit {
neighbors.into_iter().for_each(|i| {
let s = senders.get(&shuffled_nodes[i].id).unwrap();
let _ = s.send((shred, false));
});
}
shred
}
#[allow(clippy::type_complexity)]
fn run_simulation(stakes: &[u64], fanout: usize) {
let num_threads = num_threads();
// set timeout to 5 minutes
let timeout = 60 * 5;
// describe the leader
let leader_info = ContactInfo::new_localhost(&safecoin_sdk::pubkey::new_rand(), 0);
let cluster_info = ClusterInfo::new(
leader_info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
// setup staked nodes
let mut staked_nodes = HashMap::new();
// setup accounts for all nodes (leader has 0 bal)
let (s, r) = channel();
let senders: Arc<Mutex<HashMap<Pubkey, Sender<(i32, bool)>>>> =
Arc::new(Mutex::new(HashMap::new()));
senders.lock().unwrap().insert(leader_info.id, s);
let mut batches: Vec<Nodes> = Vec::with_capacity(num_threads);
(0..num_threads).for_each(|_| batches.push(HashMap::new()));
batches
.get_mut(0)
.unwrap()
.insert(leader_info.id, (false, HashSet::new(), r));
let range: Vec<_> = (1..=stakes.len()).collect();
let chunk_size = (stakes.len() + num_threads - 1) / num_threads;
range.chunks(chunk_size).for_each(|chunk| {
chunk.iter().for_each(|i| {
//distribute neighbors across threads to maximize parallel compute
let batch_ix = *i as usize % batches.len();
let node = ContactInfo::new_localhost(&safecoin_sdk::pubkey::new_rand(), 0);
staked_nodes.insert(node.id, stakes[*i - 1]);
cluster_info.insert_info(node.clone());
let (s, r) = channel();
batches
.get_mut(batch_ix)
.unwrap()
.insert(node.id, (false, HashSet::new(), r));
senders.lock().unwrap().insert(node.id, s);
})
});
let c_info = cluster_info.clone_with_id(&cluster_info.id());
let shreds_len = 100;
let shuffled_peers: Vec<Vec<ContactInfo>> = (0..shreds_len as i32)
.map(|i| {
let mut seed = [0; 32];
seed[0..4].copy_from_slice(&i.to_le_bytes());
// TODO: Ideally these should use the new methods in
// solana_core::cluster_nodes, however that would add build
// dependency on solana_core which is not desired.
let (peers, stakes_and_index) =
sorted_retransmit_peers_and_stakes(&cluster_info, Some(&staked_nodes));
let (_, shuffled_stakes_and_indexes) =
shuffle_peers_and_index(&cluster_info.id(), &peers, &stakes_and_index, seed);
shuffled_stakes_and_indexes
.into_iter()
.map(|(_, i)| peers[i].clone())
.collect()
})
.collect();
// create some "shreds".
(0..shreds_len).for_each(|i| {
let broadcast_table = &shuffled_peers[i];
find_insert_shred(&broadcast_table[0].id, i as i32, &mut batches);
});
assert!(!batches.is_empty());
// start turbine simulation
let now = Instant::now();
batches.par_iter_mut().for_each(|batch| {
let mut remaining = batch.len();
let senders: HashMap<_, _> = senders.lock().unwrap().clone();
while remaining > 0 {
for (id, (layer1_done, recv, r)) in batch.iter_mut() {
assert!(
now.elapsed().as_secs() < timeout,
"Timed out with {:?} remaining nodes",
remaining
);
let cluster = c_info.clone_with_id(id);
if !*layer1_done {
recv.iter().for_each(|i| {
retransmit(
shuffled_peers[*i as usize].clone(),
&senders,
&cluster,
fanout,
*i,
true,
);
});
*layer1_done = true;
}
//send and recv
if recv.len() < shreds_len {
loop {
match r.try_recv() {
Ok((data, retx)) => {
if recv.insert(data) {
let _ = retransmit(
shuffled_peers[data as usize].clone(),
&senders,
&cluster,
fanout,
data,
retx,
);
}
if recv.len() == shreds_len {
remaining -= 1;
break;
}
}
Err(TryRecvError::Disconnected) => break,
Err(TryRecvError::Empty) => break,
};
}
}
}
}
});
}
// Recommended to not run these tests in parallel (they are resource heavy and want all the compute)
//todo add tests with network failures
// Run with a single layer
#[test]
#[serial]
fn test_retransmit_small() {
let stakes: Vec<_> = (0..200).collect();
run_simulation(&stakes, 200);
}
// Make sure at least 2 layers are used
#[test]
#[serial]
fn test_retransmit_medium() {
let num_nodes = 2000;
let stakes: Vec<_> = (0..num_nodes).collect();
run_simulation(&stakes, 200);
}
// Make sure at least 2 layers are used but with equal stakes
#[test]
#[serial]
fn test_retransmit_medium_equal_stakes() {
let num_nodes = 2000;
let stakes: Vec<_> = (0..num_nodes).map(|_| 10).collect();
run_simulation(&stakes, 200);
}
// Scale down the network and make sure many layers are used
#[test]
#[serial]
fn test_retransmit_large() {
let num_nodes = 4000;
let stakes: Vec<_> = (0..num_nodes).collect();
run_simulation(&stakes, 2);
}
| 33.721311 | 100 | 0.51823 |
01798fc9f594e48855aec49f1861c4137a2f3def | 692 | use colored::Colorize;
use utils::cache::{clear_cache_for_package, clear_cache};
pub fn clean(args: Vec<String>) {
let mut packages = vec![];
let mut flags = vec![];
for arg in args.clone() {
if arg.starts_with("-") {
flags.push(arg);
}
else {
if arg != "clean" || arg != "novus" {
packages.push(arg);
}
}
}
println!("{}", "Clearing Cache".bright_green());
if args.len() == 2 {
clear_cache();
}
else {
for package in packages {
clear_cache_for_package(&package);
}
}
println!("{}", "Successfully Cleared Cache".bright_purple());
} | 24.714286 | 65 | 0.508671 |
fcdc0659a85d3259b37f9364ecaeb5e70b18a79a | 23,489 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotDefenderSettingsList {
#[serde(skip_serializing)]
pub value: Vec<IotDefenderSettingsModel>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotDefenderSettingsModel {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IotDefenderSettingsProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotDefenderSettingsProperties {
#[serde(rename = "deviceQuota")]
pub device_quota: i32,
#[serde(rename = "sentinelWorkspaceResourceIds")]
pub sentinel_workspace_resource_ids: Vec<String>,
#[serde(rename = "onboardingKind")]
pub onboarding_kind: iot_defender_settings_properties::OnboardingKind,
}
pub mod iot_defender_settings_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OnboardingKind {
Default,
MigratedToAzure,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DownloadLink {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PackageDownloadInfo {
#[serde(skip_serializing)]
pub version: Option<String>,
#[serde(skip_serializing)]
pub link: Option<DownloadLink>,
#[serde(rename = "versionKind", skip_serializing)]
pub version_kind: Option<package_download_info::VersionKind>,
}
pub mod package_download_info {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VersionKind {
Latest,
Previous,
Preview,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpgradePackageDownloadInfo {
#[serde(flatten)]
pub package_download_info: PackageDownloadInfo,
#[serde(rename = "fromVersion", skip_serializing)]
pub from_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PackageDownloads {
#[serde(skip_serializing)]
pub sensor: Option<package_downloads::Sensor>,
#[serde(rename = "centralManager", skip_serializing)]
pub central_manager: Option<package_downloads::CentralManager>,
#[serde(rename = "threatIntelligence", skip_serializing)]
pub threat_intelligence: Vec<PackageDownloadInfo>,
#[serde(skip_serializing)]
pub snmp: Vec<PackageDownloadInfo>,
#[serde(rename = "wmiTool", skip_serializing)]
pub wmi_tool: Vec<PackageDownloadInfo>,
#[serde(rename = "authorizedDevicesImportTemplate", skip_serializing)]
pub authorized_devices_import_template: Vec<PackageDownloadInfo>,
#[serde(rename = "deviceInformationUpdateImportTemplate", skip_serializing)]
pub device_information_update_import_template: Vec<PackageDownloadInfo>,
}
pub mod package_downloads {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sensor {
#[serde(skip_serializing)]
pub full: Option<sensor::Full>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub upgrade: Vec<UpgradePackageDownloadInfo>,
}
pub mod sensor {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Full {
#[serde(skip_serializing)]
pub iso: Vec<PackageDownloadInfo>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ovf: Option<full::Ovf>,
}
pub mod full {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Ovf {
#[serde(skip_serializing)]
pub enterprise: Vec<PackageDownloadInfo>,
#[serde(skip_serializing)]
pub medium: Vec<PackageDownloadInfo>,
#[serde(skip_serializing)]
pub line: Vec<PackageDownloadInfo>,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CentralManager {
#[serde(skip_serializing)]
pub full: Option<central_manager::Full>,
#[serde(skip_serializing)]
pub upgrade: Vec<UpgradePackageDownloadInfo>,
}
pub mod central_manager {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Full {
#[serde(skip_serializing)]
pub iso: Vec<PackageDownloadInfo>,
#[serde(skip_serializing)]
pub ovf: Option<full::Ovf>,
}
pub mod full {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Ovf {
#[serde(skip_serializing)]
pub enterprise: Vec<PackageDownloadInfo>,
#[serde(rename = "enterpriseHighAvailability", skip_serializing)]
pub enterprise_high_availability: Vec<PackageDownloadInfo>,
#[serde(skip_serializing)]
pub medium: Vec<PackageDownloadInfo>,
#[serde(rename = "mediumHighAvailability", skip_serializing)]
pub medium_high_availability: Vec<PackageDownloadInfo>,
}
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotSensorsList {
#[serde(skip_serializing)]
pub value: Vec<IotSensorsModel>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotSensorsModel {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IotSensorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotSensorProperties {
#[serde(rename = "connectivityTime", skip_serializing)]
pub connectivity_time: Option<String>,
#[serde(rename = "creationTime", skip_serializing)]
pub creation_time: Option<String>,
#[serde(rename = "dynamicLearning", skip_serializing)]
pub dynamic_learning: Option<bool>,
#[serde(rename = "learningMode", skip_serializing)]
pub learning_mode: Option<bool>,
#[serde(rename = "sensorStatus", skip_serializing)]
pub sensor_status: Option<iot_sensor_properties::SensorStatus>,
#[serde(rename = "sensorVersion", skip_serializing)]
pub sensor_version: Option<String>,
#[serde(rename = "tiAutomaticUpdates", default, skip_serializing_if = "Option::is_none")]
pub ti_automatic_updates: Option<bool>,
#[serde(rename = "tiStatus", skip_serializing)]
pub ti_status: Option<iot_sensor_properties::TiStatus>,
#[serde(rename = "tiVersion", skip_serializing)]
pub ti_version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub zone: Option<String>,
#[serde(rename = "sensorType", default, skip_serializing_if = "Option::is_none")]
pub sensor_type: Option<iot_sensor_properties::SensorType>,
}
pub mod iot_sensor_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SensorStatus {
Ok,
Disconnected,
Unavailable,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TiStatus {
Ok,
Failed,
InProgress,
UpdateAvailable,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SensorType {
Ot,
Enterprise,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResetPasswordInput {
#[serde(rename = "applianceId", default, skip_serializing_if = "Option::is_none")]
pub appliance_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeviceList {
pub value: Vec<Device>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Device {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DeviceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeviceProperties {
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "deviceType", default, skip_serializing_if = "Option::is_none")]
pub device_type: Option<String>,
#[serde(rename = "sourceName", skip_serializing)]
pub source_name: Option<String>,
#[serde(rename = "networkInterfaces", skip_serializing)]
pub network_interfaces: Vec<NetworkInterface>,
#[serde(skip_serializing)]
pub vendor: Option<String>,
#[serde(rename = "osName", default, skip_serializing_if = "Option::is_none")]
pub os_name: Option<String>,
#[serde(skip_serializing)]
pub protocols: Vec<Protocol>,
#[serde(rename = "lastActiveTime", skip_serializing)]
pub last_active_time: Option<String>,
#[serde(rename = "lastUpdateTime", skip_serializing)]
pub last_update_time: Option<String>,
#[serde(rename = "managementState", skip_serializing)]
pub management_state: Option<device_properties::ManagementState>,
#[serde(rename = "authorizationState", default, skip_serializing_if = "Option::is_none")]
pub authorization_state: Option<device_properties::AuthorizationState>,
#[serde(rename = "deviceCriticality", default, skip_serializing_if = "Option::is_none")]
pub device_criticality: Option<device_properties::DeviceCriticality>,
#[serde(rename = "purdueLevel", default, skip_serializing_if = "Option::is_none")]
pub purdue_level: Option<device_properties::PurdueLevel>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub notes: Option<String>,
#[serde(skip_serializing)]
pub firmwares: Vec<Firmware>,
#[serde(rename = "discoveryTime", skip_serializing)]
pub discovery_time: Option<String>,
#[serde(rename = "programmingState", skip_serializing)]
pub programming_state: Option<device_properties::ProgrammingState>,
#[serde(rename = "lastProgrammingTime", skip_serializing)]
pub last_programming_time: Option<String>,
#[serde(rename = "scanningFunctionality", skip_serializing)]
pub scanning_functionality: Option<device_properties::ScanningFunctionality>,
#[serde(rename = "lastScanTime", skip_serializing)]
pub last_scan_time: Option<String>,
#[serde(rename = "riskScore", skip_serializing)]
pub risk_score: Option<i32>,
#[serde(skip_serializing)]
pub sensors: Vec<Sensor>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub site: Option<Site>,
#[serde(rename = "deviceStatus", skip_serializing)]
pub device_status: Option<device_properties::DeviceStatus>,
}
pub mod device_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ManagementState {
Managed,
Unmanaged,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthorizationState {
Authorized,
Unauthorized,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeviceCriticality {
Important,
Standard,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PurdueLevel {
ProcessControl,
Supervisory,
Enterprise,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProgrammingState {
ProgrammingDevice,
NotProgrammingDevice,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ScanningFunctionality {
ScannerDevice,
NotScannerDevice,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeviceStatus {
Active,
Removed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterface {
#[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")]
pub ip_address: Option<IpAddress>,
#[serde(rename = "macAddress", default, skip_serializing_if = "Option::is_none")]
pub mac_address: Option<MacAddress>,
#[serde(skip_serializing)]
pub vlans: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IpAddress {
#[serde(rename = "v4Address", skip_serializing)]
pub v4_address: Option<String>,
#[serde(rename = "detectionTime", skip_serializing)]
pub detection_time: Option<String>,
#[serde(rename = "subnetCidr", skip_serializing)]
pub subnet_cidr: Option<String>,
#[serde(skip_serializing)]
pub fqdn: Option<String>,
#[serde(rename = "fqdnLastLookupTime", skip_serializing)]
pub fqdn_last_lookup_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MacAddress {
#[serde(skip_serializing)]
pub address: Option<String>,
#[serde(rename = "detectionTime", skip_serializing)]
pub detection_time: Option<String>,
#[serde(skip_serializing)]
pub significance: Option<mac_address::Significance>,
#[serde(rename = "relationToIpStatus", skip_serializing)]
pub relation_to_ip_status: Option<mac_address::RelationToIpStatus>,
}
pub mod mac_address {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Significance {
Primary,
Secondary,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RelationToIpStatus {
Guess,
Certain,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Firmware {
#[serde(rename = "moduleAddress", skip_serializing)]
pub module_address: Option<String>,
#[serde(skip_serializing)]
pub rack: Option<String>,
#[serde(skip_serializing)]
pub slot: Option<String>,
#[serde(skip_serializing)]
pub serial: Option<String>,
#[serde(skip_serializing)]
pub model: Option<String>,
#[serde(skip_serializing)]
pub version: Option<String>,
#[serde(rename = "additionalData", skip_serializing)]
pub additional_data: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Protocol {
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identifiers: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sensor {
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(skip_serializing)]
pub zone: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Site {
#[serde(rename = "displayName", skip_serializing)]
pub display_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OnPremiseIotSensorsList {
#[serde(skip_serializing)]
pub value: Vec<OnPremiseIotSensor>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OnPremiseIotSensor {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OnPremiseIotSensorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OnPremiseIotSensorProperties {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotSitesList {
#[serde(skip_serializing)]
pub value: Vec<IotSitesModel>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotSitesModel {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IotSiteProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotSiteProperties {
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotAlertListModel {
#[serde(skip_serializing)]
pub value: Vec<IotAlertModel>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotAlertModel {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IotAlertPropertiesModel>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotAlertPropertiesModel {
#[serde(rename = "systemAlertId", skip_serializing)]
pub system_alert_id: Option<String>,
#[serde(rename = "compromisedEntity", skip_serializing)]
pub compromised_entity: Option<String>,
#[serde(rename = "alertType", skip_serializing)]
pub alert_type: Option<String>,
#[serde(rename = "startTimeUtc", skip_serializing)]
pub start_time_utc: Option<String>,
#[serde(rename = "endTimeUtc", skip_serializing)]
pub end_time_utc: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub entities: Vec<serde_json::Value>,
#[serde(rename = "extendedProperties", default, skip_serializing_if = "Option::is_none")]
pub extended_properties: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotAlertTypeList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<IotAlertType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotAlertType {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IotAlertTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotAlertTypeProperties {
#[serde(rename = "alertDisplayName", skip_serializing)]
pub alert_display_name: Option<String>,
#[serde(skip_serializing)]
pub severity: Option<iot_alert_type_properties::Severity>,
#[serde(skip_serializing)]
pub description: Option<String>,
#[serde(rename = "providerName", skip_serializing)]
pub provider_name: Option<String>,
#[serde(rename = "productName", skip_serializing)]
pub product_name: Option<String>,
#[serde(rename = "productComponentName", skip_serializing)]
pub product_component_name: Option<String>,
#[serde(rename = "vendorName", skip_serializing)]
pub vendor_name: Option<String>,
#[serde(skip_serializing)]
pub intent: Option<iot_alert_type_properties::Intent>,
#[serde(rename = "remediationSteps", skip_serializing)]
pub remediation_steps: Vec<String>,
}
pub mod iot_alert_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Severity {
Informational,
Low,
Medium,
High,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Intent {
Unknown,
PreAttack,
InitialAccess,
Persistence,
PrivilegeEscalation,
DefenseEvasion,
CredentialAccess,
Discovery,
LateralMovement,
Execution,
Collection,
Exfiltration,
CommandAndControl,
Impact,
Probing,
Exploitation,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotRecommendationListModel {
#[serde(skip_serializing)]
pub value: Vec<IotRecommendationModel>,
#[serde(rename = "nextLink", skip_serializing)]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotRecommendationModel {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IotRecommendationPropertiesModel>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotRecommendationPropertiesModel {
#[serde(rename = "deviceId", skip_serializing)]
pub device_id: Option<String>,
#[serde(rename = "recommendationType", skip_serializing)]
pub recommendation_type: Option<String>,
#[serde(rename = "discoveredTimeUtc", skip_serializing)]
pub discovered_time_utc: Option<String>,
#[serde(rename = "recommendationAdditionalData", default, skip_serializing_if = "Option::is_none")]
pub recommendation_additional_data: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotRecommendationTypeList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<IotRecommendationType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotRecommendationType {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IotRecommendationTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IotRecommendationTypeProperties {
#[serde(rename = "recommendationDisplayName", skip_serializing)]
pub recommendation_display_name: Option<String>,
#[serde(skip_serializing)]
pub severity: Option<iot_recommendation_type_properties::Severity>,
#[serde(skip_serializing)]
pub description: Option<String>,
#[serde(rename = "productName", skip_serializing)]
pub product_name: Option<String>,
#[serde(rename = "productComponentName", skip_serializing)]
pub product_component_name: Option<String>,
#[serde(rename = "vendorName", skip_serializing)]
pub vendor_name: Option<String>,
#[serde(skip_serializing)]
pub control: Option<String>,
#[serde(rename = "remediationSteps", skip_serializing)]
pub remediation_steps: Vec<String>,
#[serde(rename = "dataSource", skip_serializing)]
pub data_source: Option<String>,
}
pub mod iot_recommendation_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Severity {
Unknown,
NotApplicable,
Healthy,
OffByPolicy,
Low,
Medium,
High,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<CloudErrorBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudErrorBody {
#[serde(skip_serializing)]
pub code: Option<String>,
#[serde(skip_serializing)]
pub message: Option<String>,
#[serde(skip_serializing)]
pub target: Option<String>,
#[serde(skip_serializing)]
pub details: Vec<CloudErrorBody>,
#[serde(rename = "additionalInfo", skip_serializing)]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", skip_serializing)]
pub type_: Option<String>,
#[serde(skip_serializing)]
pub info: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(skip_serializing)]
pub id: Option<String>,
#[serde(skip_serializing)]
pub name: Option<String>,
#[serde(rename = "type", skip_serializing)]
pub type_: Option<String>,
}
| 37.885484 | 103 | 0.693899 |
edf3cbea503a05bd1fa9a99d89fcb9191b71570b | 713 | use std::io::Write;
use anyhow::{Result};
pub(crate) struct Writer<'a, T : Write>{
write : &'a mut T,
len : usize,
}
impl<'a, T : Write> Writer<'a, T>{
pub fn new(write : &'a mut T) -> Self{ Self{ write, len : 0 } }
pub fn write_byte(&mut self, byte : u8) -> Result<usize>{
self.write.write_all(&[byte])?;
self.len += 1;
Ok(1)
}
pub fn write(&mut self, bytes : &[u8]) -> Result<usize>{
self.write.write_all(bytes)?;
self.len += bytes.len();
Ok(bytes.len())
}
pub fn bytes_written(&self) -> usize{ self.len }
//やる必要ある???
// pub fn flush(&mut self) -> Result<()>{
// self.write.flush()?;
// Ok(())
// }
} | 25.464286 | 67 | 0.506311 |
08569057d33d94e1cf0cf4457819d75faacb5dae | 1,079 | impl Solution {
#[inline]
fn is_nozero(mut i: i32) -> bool {
let mut d = 1000;
while i < d {
d /= 10;
}
while d > 0 {
let q = i / d;
if q == 0 {
return false;
}
i %= d;
d /= 10;
}
true
}
pub fn get_no_zero_integers(n: i32) -> Vec<i32> {
let is_nozero = Solution::is_nozero;
let (mut upper, mut lower) = (n, 1);
// break becomes return value.
loop {
let candidate = upper + lower;
if candidate == n {
break vec![lower, upper];
} else if candidate < n {
loop {
lower += 1;
if is_nozero(lower) {
break;
}
}
} else {
loop {
upper -= 1;
if is_nozero(upper) {
break;
}
}
}
}
}
}
| 23.456522 | 53 | 0.318814 |
8f729bf688440ab58dcca68d2eec71f4df65d32a | 11,540 | use clap::{crate_version, App, Arg, ArgMatches};
use log::*;
use solana::client::mk_client;
use solana::cluster_info::{Node, NodeInfo, FULLNODE_PORT_RANGE};
use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::leader_scheduler::LeaderScheduler;
use solana::local_vote_signer_service::LocalVoteSignerService;
use solana::socketaddr;
use solana::thin_client::{poll_gossip_for_leader, ThinClient};
use solana::vote_signer_proxy::{RemoteVoteSigner, VoteSignerProxy};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::vote_program::VoteProgram;
use solana_sdk::vote_transaction::VoteTransaction;
use std::fs::File;
use std::io::{Error, ErrorKind, Result};
use std::net::{Ipv4Addr, SocketAddr};
use std::process::exit;
use std::sync::Arc;
use std::sync::RwLock;
use std::thread::sleep;
use std::time::Duration;
fn parse_identity(matches: &ArgMatches<'_>) -> (Keypair, SocketAddr) {
if let Some(i) = matches.value_of("identity") {
let path = i.to_string();
if let Ok(file) = File::open(path.clone()) {
let parse: serde_json::Result<solana_fullnode_config::Config> =
serde_json::from_reader(file);
if let Ok(config_data) = parse {
let keypair = config_data.keypair();
let node_info = NodeInfo::new_with_pubkey_socketaddr(
keypair.pubkey(),
&config_data.bind_addr(FULLNODE_PORT_RANGE.0),
);
(keypair, node_info.gossip)
} else {
eprintln!("failed to parse {}", path);
exit(1);
}
} else {
eprintln!("failed to read {}", path);
exit(1);
}
} else {
(Keypair::new(), socketaddr!(0, 8000))
}
}
fn create_and_fund_vote_account(
client: &mut ThinClient,
vote_account: Pubkey,
node_keypair: &Arc<Keypair>,
) -> Result<()> {
let pubkey = node_keypair.pubkey();
let node_balance = client.poll_get_balance(&pubkey)?;
info!("node balance is {}", node_balance);
if node_balance < 1 {
return Err(Error::new(
ErrorKind::Other,
"insufficient tokens, one token required",
));
}
// Create the vote account if necessary
if client.poll_get_balance(&vote_account).unwrap_or(0) == 0 {
// Need at least two tokens as one token will be spent on a vote_account_new() transaction
if node_balance < 2 {
error!("insufficient tokens, two tokens required");
return Err(Error::new(
ErrorKind::Other,
"insufficient tokens, two tokens required",
));
}
loop {
let last_id = client.get_last_id();
info!("create_and_fund_vote_account last_id={:?}", last_id);
let transaction =
VoteTransaction::vote_account_new(node_keypair, vote_account, last_id, 1, 1);
match client.transfer_signed(&transaction) {
Ok(signature) => {
match client.poll_for_signature(&signature) {
Ok(_) => match client.poll_get_balance(&vote_account) {
Ok(balance) => {
info!("vote account balance: {}", balance);
break;
}
Err(e) => {
info!("Failed to get vote account balance: {:?}", e);
}
},
Err(e) => {
info!(
"vote_account_new signature not found: {:?}: {:?}",
signature, e
);
}
};
}
Err(e) => {
info!("Failed to send vote_account_new transaction: {:?}", e);
}
};
sleep(Duration::from_secs(2));
}
}
info!("Checking for vote account registration");
let vote_account_user_data = client.get_account_userdata(&vote_account);
if let Ok(Some(vote_account_user_data)) = vote_account_user_data {
if let Ok(vote_state) = VoteProgram::deserialize(&vote_account_user_data) {
if vote_state.node_id == pubkey {
return Ok(());
}
}
}
Err(Error::new(
ErrorKind::Other,
"expected successful vote account registration",
))
}
fn main() {
solana_logger::setup();
solana_metrics::set_panic_hook("fullnode");
let matches = App::new("fullnode")
.version(crate_version!())
.arg(
Arg::with_name("entry_stream")
.long("entry-stream")
.takes_value(true)
.value_name("UNIX DOMAIN SOCKET")
.help("Open entry stream at this unix domain socket location")
)
.arg(
Arg::with_name("identity")
.short("i")
.long("identity")
.value_name("PATH")
.takes_value(true)
.help("Run with the identity found in FILE"),
)
.arg(
Arg::with_name("init_complete_file")
.long("init-complete-file")
.value_name("FILE")
.takes_value(true)
.help("Create this file, if it doesn't already exist, once node initialization is complete"),
)
.arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
.value_name("DIR")
.takes_value(true)
.required(true)
.help("Use DIR as persistent ledger location"),
)
.arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("Rendezvous with the cluster at this gossip entry point"),
)
.arg(
Arg::with_name("no_leader_rotation")
.long("no-leader-rotation")
.help("Disable leader rotation"),
)
.arg(
Arg::with_name("no_signer")
.long("no-signer")
.takes_value(false)
.conflicts_with("signer")
.help("Launch node without vote signer"),
)
.arg(
Arg::with_name("no_sigverify")
.short("v")
.long("no-sigverify")
.help("Run without signature verification"),
)
.arg(
Arg::with_name("rpc_port")
.long("rpc-port")
.value_name("PORT")
.takes_value(true)
.help("RPC port to use for this node"),
)
.arg(
Arg::with_name("signer")
.short("s")
.long("signer")
.value_name("HOST:PORT")
.takes_value(true)
.help("Rendezvous with the vote signer at this RPC end point"),
)
.get_matches();
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.sigverify_disabled = matches.is_present("no_sigverify");
let no_signer = matches.is_present("no_signer");
let use_only_bootstrap_leader = matches.is_present("no_leader_rotation");
let (keypair, gossip) = parse_identity(&matches);
let ledger_path = matches.value_of("ledger").unwrap();
let cluster_entrypoint = matches
.value_of("network")
.map(|network| network.parse().expect("failed to parse network address"));
let (_signer_service, signer_addr) = if let Some(signer_addr) = matches.value_of("signer") {
(
None,
signer_addr.to_string().parse().expect("Signer IP Address"),
)
} else {
// Run a local vote signer if a vote signer service address was not provided
let (signer_service, signer_addr) = LocalVoteSignerService::new();
(Some(signer_service), signer_addr)
};
let (rpc_port, rpc_pubsub_port) = if let Some(port) = matches.value_of("rpc_port") {
let port_number = port.to_string().parse().expect("integer");
if port_number == 0 {
eprintln!("Invalid RPC port requested: {:?}", port);
exit(1);
}
(port_number, port_number + 1)
} else {
(
solana_netutil::find_available_port_in_range(FULLNODE_PORT_RANGE)
.expect("unable to allocate rpc_port"),
solana_netutil::find_available_port_in_range(FULLNODE_PORT_RANGE)
.expect("unable to allocate rpc_pubsub_port"),
)
};
let init_complete_file = matches.value_of("init_complete_file");
fullnode_config.entry_stream = matches.value_of("entry_stream").map(|s| s.to_string());
let keypair = Arc::new(keypair);
let mut node = Node::new_with_external_ip(keypair.pubkey(), &gossip);
node.info.rpc.set_port(rpc_port);
node.info.rpc_pubsub.set_port(rpc_pubsub_port);
let mut leader_scheduler = LeaderScheduler::default();
leader_scheduler.use_only_bootstrap_leader = use_only_bootstrap_leader;
let vote_signer_option = if !no_signer {
let vote_signer = VoteSignerProxy::new_with_signer(
&keypair,
Box::new(RemoteVoteSigner::new(signer_addr)),
);
info!("Signer service address: {:?}", signer_addr);
info!("New vote account ID is {:?}", vote_signer.pubkey());
Some(Arc::new(vote_signer))
} else {
None
};
let vote_account_option = vote_signer_option.as_ref().map(|x| x.pubkey());
let gossip_addr = node.info.gossip;
let mut fullnode = Fullnode::new(
node,
keypair.clone(),
ledger_path,
Arc::new(RwLock::new(leader_scheduler)),
vote_signer_option,
cluster_entrypoint
.map(|i| NodeInfo::new_entry_point(&i))
.as_ref(),
fullnode_config,
);
if let Some(vote_account) = vote_account_option {
let leader_node_info = loop {
info!("Looking for leader...");
match poll_gossip_for_leader(gossip_addr, Some(10)) {
Ok(leader_node_info) => {
info!("Found leader: {:?}", leader_node_info);
break leader_node_info;
}
Err(err) => {
info!("Unable to find leader: {:?}", err);
}
};
};
let mut client = mk_client(&leader_node_info);
if let Err(err) = create_and_fund_vote_account(&mut client, vote_account, &keypair) {
panic!("Failed to create_and_fund_vote_account: {:?}", err);
}
}
if let Some(filename) = init_complete_file {
File::create(filename).unwrap_or_else(|_| panic!("Unable to create: {}", filename));
}
info!("Node initialized");
loop {
let status = fullnode.handle_role_transition();
match status {
Ok(Some(transition)) => {
info!("role_transition complete: {:?}", transition);
}
_ => {
panic!(
"Fullnode TPU/TVU exited for some unexpected reason: {:?}",
status
);
}
};
}
}
| 36.403785 | 109 | 0.540035 |
f5c72ec33f2ea7250770a7ef5e06b8fbeb39e695 | 715 | fn main() {
let n = 10;
fib_loop(n);
fib_while(n);
fib_for(n);
}
fn fib_loop(n: u8) {
let mut a = 1;
let mut b = 1;
let mut i = 2u8;
loop {
let c = a + b;
a = b;
b = c;
i += 1;
println!("next val is {}", b);
if i >= n {
break;
}
}
}
fn fib_while(n: u8) {
let (mut a, mut b, mut i) = (1, 1, 2);
while i < n {
let c = a + b;
a = b;
b = c;
i += 1;
println!("next val is {}", b);
}
}
fn fib_for(n: u8){
let (mut a, mut b) = (1, 1);
for _i in 2..n {
let c = a + b;
a = b;
b = c;
println!("next val is {}", b);
}
} | 15.212766 | 42 | 0.348252 |
fe1201ea06f187fe51856642e55bd0f62d2a6d5b | 1,143 | // Test that coherence detects overlap when some of the types in the
// impls are projections of associated type. Issue #20624.
use std::marker::PhantomData;
use std::ops::Deref;
pub struct Cow<'a, B: ?Sized>(PhantomData<(&'a (),B)>);
/// Trait for moving into a `Cow`
pub trait IntoCow<'a, B: ?Sized> {
/// Moves `self` into `Cow`
fn into_cow(self) -> Cow<'a, B>;
}
impl<'a, B: ?Sized> IntoCow<'a, B> for <B as ToOwned>::Owned where B: ToOwned {
fn into_cow(self) -> Cow<'a, B> {
Cow(PhantomData)
}
}
impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned {
//~^ ERROR E0119
fn into_cow(self) -> Cow<'a, B> {
self
}
}
impl<'a, B: ?Sized> IntoCow<'a, B> for &'a B where B: ToOwned {
//~^ ERROR E0119
fn into_cow(self) -> Cow<'a, B> {
Cow(PhantomData)
}
}
impl ToOwned for u8 {
type Owned = &'static u8;
fn to_owned(&self) -> &'static u8 { panic!() }
}
/// A generalization of Clone to borrowed data.
pub trait ToOwned {
type Owned;
/// Create owned data from borrowed data, usually by copying.
fn to_owned(&self) -> Self::Owned;
}
fn main() {}
| 22.86 | 79 | 0.596675 |
4abc7decd7df5c1a6a2be1eaad943ead518bab22 | 2,126 | fn main() {
// This binding lives in the main function
let long_lived_binding = 1;
let mut mutable_binding = "Hello";
println!("Scope: Main");
println!("long_lived_binding: {}", long_lived_binding);
// This is a block, and has a smaller scope than the main function
{
println!("Scope: Block");
// This binding only exists in this block
let short_lived_binding = 2;
println!("short_lived_binding: {}", short_lived_binding);
println!("long_lived_binding: {}", long_lived_binding);
println!("mutable_binding: {}", mutable_binding);
// long_lived_binding can be changed witin this scope. "Shadowing"
let long_lived_binding: f32 = 5.5;
println!("shadowed long_lived_binding: {}", long_lived_binding);
// Changes to the mutable variable in this scope will persist in the outer scope
println!("mutable_binding: {}", mutable_binding);
mutable_binding = "World";
println!("mutable_binding: {}", mutable_binding);
// When shadowing a mutable variable, omitting mut from the declration will "freeze" the variable within the scope
let mutable_binding = mutable_binding;
// mutable_binding = "Hello World";
// println!("changed frozen mutable_binding: {}", mutable_binding);
}
println!("Scope: Main");
// End of the block
// `short_lived_binding` is no longer usable
println!("long_lived_binding: {}", long_lived_binding);
// This binding also *shadows* the previous binding
let long_lived_binding = 'a';
println!("shadowed long_lived_binding: {}", long_lived_binding);
println!("mutable_binding: {}", mutable_binding);
// Declare a variable binding
let a_binding;
{
let x = 2;
// Initialize the binding
a_binding = x * x;
}
println!("a binding: {}", a_binding);
let another_binding;
// Error! Can't use until initialized
// println!("another binding: {}", another_binding);
another_binding = 1;
println!("another binding: {}", another_binding);
}
| 31.264706 | 122 | 0.642992 |
90fd3b8b601ec184f53d42a34e022a2176ac2ee5 | 8,131 | // Copyright (c) The Diem Core Contributors
// Copyright (c) The Move Contributors
// SPDX-License-Identifier: Apache-2.0
pub mod cargo_runner;
mod extensions;
pub mod test_reporter;
pub mod test_runner;
use crate::test_runner::TestRunner;
use clap::*;
use move_command_line_common::files::verify_and_create_named_address_mapping;
use move_compiler::{
self,
diagnostics::{self, codes::Severity},
shared::{self, NumericalAddress},
unit_test::{self, TestPlan},
Compiler, Flags, PASS_CFGIR,
};
use move_core_types::language_storage::ModuleId;
use move_vm_runtime::native_functions::NativeFunctionTable;
use std::{
collections::BTreeMap,
io::{Result, Write},
marker::Send,
sync::Mutex,
};
#[derive(Debug, Parser, Clone)]
#[clap(author, version, about)]
pub struct UnitTestingConfig {
/// Bound the number of instructions that can be executed by any one test.
#[clap(
name = "instructions",
default_value = "5000",
short = 'i',
long = "instructions"
)]
pub instruction_execution_bound: u64,
/// A filter string to determine which unit tests to run
#[clap(name = "filter", short = 'f', long = "filter")]
pub filter: Option<String>,
/// List all tests
#[clap(name = "list", short = 'l', long = "list")]
pub list: bool,
/// Number of threads to use for running tests.
#[clap(
name = "num_threads",
default_value = "8",
short = 't',
long = "threads"
)]
pub num_threads: usize,
/// Dependency files
#[clap(
name = "dependencies",
long = "dependencies",
short = 'd',
takes_value(true),
multiple_values(true),
multiple_occurrences(true)
)]
pub dep_files: Vec<String>,
/// Report test statistics at the end of testing
#[clap(name = "report_statistics", short = 's', long = "statistics")]
pub report_statistics: bool,
/// Show the storage state at the end of execution of a failing test
#[clap(name = "global_state_on_error", short = 'g', long = "state_on_error")]
pub report_storage_on_error: bool,
#[clap(
name = "report_stacktrace_on_abort",
short = 'r',
long = "stacktrace_on_abort"
)]
pub report_stacktrace_on_abort: bool,
/// Named address mapping
#[clap(
name = "NAMED_ADDRESSES",
short = 'a',
long = "addresses",
parse(try_from_str = shared::parse_named_address)
)]
pub named_address_values: Vec<(String, NumericalAddress)>,
/// Source files
#[clap(
name = "sources",
takes_value(true),
multiple_values(true),
multiple_occurrences(true)
)]
pub source_files: Vec<String>,
/// Use the stackless bytecode interpreter to run the tests and cross check its results with
/// the execution result from Move VM.
#[clap(long = "stackless")]
pub check_stackless_vm: bool,
/// Verbose mode
#[clap(short = 'v', long = "verbose")]
pub verbose: bool,
/// Use the EVM-based execution backend.
/// Does not work with --stackless.
#[cfg(feature = "evm-backend")]
#[clap(long = "evm")]
pub evm: bool,
}
fn format_module_id(module_id: &ModuleId) -> String {
format!(
"0x{}::{}",
module_id.address().short_str_lossless(),
module_id.name()
)
}
impl UnitTestingConfig {
/// Create a unit testing config for use with `register_move_unit_tests`
pub fn default_with_bound(bound: Option<u64>) -> Self {
Self {
instruction_execution_bound: bound.unwrap_or(5000),
filter: None,
num_threads: 8,
report_statistics: false,
report_storage_on_error: false,
report_stacktrace_on_abort: false,
source_files: vec![],
dep_files: vec![],
check_stackless_vm: false,
verbose: false,
list: false,
named_address_values: vec![],
#[cfg(feature = "evm-backend")]
evm: false,
}
}
pub fn with_named_addresses(
mut self,
named_address_values: BTreeMap<String, NumericalAddress>,
) -> Self {
assert!(self.named_address_values.is_empty());
self.named_address_values = named_address_values.into_iter().collect();
self
}
fn compile_to_test_plan(
&self,
source_files: Vec<String>,
deps: Vec<String>,
) -> Option<TestPlan> {
let addresses =
verify_and_create_named_address_mapping(self.named_address_values.clone()).ok()?;
let (files, comments_and_compiler_res) =
Compiler::from_files(source_files, deps, addresses)
.set_flags(Flags::testing())
.run::<PASS_CFGIR>()
.unwrap();
let (_, compiler) =
diagnostics::unwrap_or_report_diagnostics(&files, comments_and_compiler_res);
let (mut compiler, cfgir) = compiler.into_ast();
let compilation_env = compiler.compilation_env();
let test_plan = unit_test::plan_builder::construct_test_plan(compilation_env, None, &cfgir);
if let Err(diags) = compilation_env.check_diags_at_or_above_severity(Severity::Warning) {
diagnostics::report_diagnostics(&files, diags);
}
let compilation_result = compiler.at_cfgir(cfgir).build();
let (units, warnings) =
diagnostics::unwrap_or_report_diagnostics(&files, compilation_result);
diagnostics::report_warnings(&files, warnings);
test_plan.map(|tests| TestPlan::new(tests, files, units))
}
/// Build a test plan from a unit test config
pub fn build_test_plan(&self) -> Option<TestPlan> {
let deps = self.dep_files.clone();
let TestPlan {
files, module_info, ..
} = self.compile_to_test_plan(deps.clone(), vec![])?;
let mut test_plan = self.compile_to_test_plan(self.source_files.clone(), deps)?;
test_plan.module_info.extend(module_info.into_iter());
test_plan.files.extend(files.into_iter());
Some(test_plan)
}
/// Public entry point to Move unit testing as a library
/// Returns `true` if all unit tests passed. Otherwise, returns `false`.
pub fn run_and_report_unit_tests<W: Write + Send>(
&self,
test_plan: TestPlan,
native_function_table: Option<NativeFunctionTable>,
writer: W,
) -> Result<(W, bool)> {
let shared_writer = Mutex::new(writer);
if self.list {
for (module_id, test_plan) in &test_plan.module_tests {
for test_name in test_plan.tests.keys() {
writeln!(
shared_writer.lock().unwrap(),
"{}::{}: test",
format_module_id(module_id),
test_name
)?;
}
}
return Ok((shared_writer.into_inner().unwrap(), true));
}
writeln!(shared_writer.lock().unwrap(), "Running Move unit tests")?;
let mut test_runner = TestRunner::new(
self.instruction_execution_bound,
self.num_threads,
self.check_stackless_vm,
self.verbose,
self.report_storage_on_error,
self.report_stacktrace_on_abort,
test_plan,
native_function_table,
verify_and_create_named_address_mapping(self.named_address_values.clone()).unwrap(),
#[cfg(feature = "evm-backend")]
self.evm,
)
.unwrap();
if let Some(filter_str) = &self.filter {
test_runner.filter(filter_str)
}
let test_results = test_runner.run(&shared_writer).unwrap();
if self.report_statistics {
test_results.report_statistics(&shared_writer)?;
}
let all_tests_passed = test_results.summarize(&shared_writer)?;
let writer = shared_writer.into_inner().unwrap();
Ok((writer, all_tests_passed))
}
}
| 31.638132 | 100 | 0.605092 |
21e1ff45c4ca39460cd5b26a0272050c9d42515e | 1,724 | // errors2.rs
// Say we're writing a game where you can buy items with tokens. All items cost
// 5 tokens, and whenever you purchase items there is a processing fee of 1
// token. A player of the game will type in how many items they want to buy,
// and the `total_cost` function will calculate the total number of tokens.
// Since the player typed in the quantity, though, we get it as a string-- and
// they might have typed anything, not just numbers!
// Right now, this function isn't handling the error case at all (and isn't
// handling the success case properly either). What we want to do is:
// if we call the `parse` function on a string that is not a number, that
// function will return a `ParseIntError`, and in that case, we want to
// immediately return that error from our function and not try to multiply
// and add.
// There are at least two ways to implement this that are both correct-- but
// one is a lot shorter! Execute `rustlings hint errors2` for hints to both ways.
use std::num::ParseIntError;
pub fn total_cost(item_quantity: &str) -> Result<i32, ParseIntError> {
let processing_fee = 1;
let cost_per_item = 5;
let qty = item_quantity.parse::<i32>()?;
Ok(qty * cost_per_item + processing_fee)
// match qty {
// Ok(qty) => Ok(qty * cost_per_item + processing_fee),
// Err(e) => Err(e),
// }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn item_quantity_is_a_valid_number() {
assert_eq!(total_cost("34"), Ok(171));
}
#[test]
fn item_quantity_is_an_invalid_number() {
assert_eq!(
total_cost("beep boop").unwrap_err().to_string(),
"invalid digit found in string"
);
}
}
| 33.803922 | 81 | 0.671114 |
4a4f0fc91d4e63c77fd3d91209df1711169acf72 | 152 | use day_1::*;
fn main() {
let data = read_data("./data");
println!("Part 1: {}", part_1(&data));
println!("Part 2: {}", part_2(&data));
}
| 16.888889 | 42 | 0.513158 |
3309f723616c58da6d1ef6d0845be38032c6bd64 | 12,086 | use std::sync::Arc;
use actix_session::Session as ApiSession;
use crate::{models::http::api_session_cookie, error::{ApiResult, ApiError}, util::respond};
use uuid::Uuid;
use time::{Duration, OffsetDateTime};
use ap_com::{Id, Db, Model};
use actix_web::{
HttpRequest, HttpResponse, HttpResponseBuilder, Responder,
http::header::{self, ContentType},
cookie::Cookie,
web::{self, ServiceConfig, Json, Data, Form, Path}
};
use ap_com::
models::{
user::{UserIn, User, session::Session, account::Account, profile::Profile},
user::credentials::{CredentialsSignup, CredentialsIn, Credentials},
};
pub fn routes(cfg: &mut ServiceConfig) {
cfg
.service(web::scope("/signup")
.route("", web::post().to(signup))
.route("/user", web::post().to(signup_user))
.route("/creds", web::post().to(signup_creds))
.route("/account", web::post().to(signup_account))
)
.route("", web::post().to(index))
.route("/login", web::post().to(login_creds))
.route("/logout", web::post().to(logout_creds))
.route("/check", web::post().to(check_creds));
}
/// NOTE: Credentials signup -- four phases
/// 1. User provides e-mail and real name (creates User row)
/// 2. User provides username and password (creates Credentials row)
/// 3. Credentials account (Devisa as the provider) created due to implication
/// 4. User provides optional profile info (create Profile row -- can be empty)
/// The signup handler will handle steps 1 and 2, then pass on 3 to another handler.
///
/// (I know all the cloning is stupid and nooby af im still learning)
pub async fn signup(
req: HttpRequest,
db: Data<Db>,
data: Form<CredentialsSignup>
) -> actix_web::Result<HttpResponse>
{
let (uid, cid) = (Id::new(Uuid::new_v4()), Id::new(Uuid::new_v4()));
let user = User::new(&data.email, Some(&data.name), None)
.insert(&db.pool)
.await
.map_err(|e| {
tracing::error!("Error inserting user: {:?}", e);
sentry::capture_error(&e);
e
}).expect("Could not insert user");
println!("Created user. {:?}", &user.clone());
let creds = Credentials::create(user.clone().id, data.clone().username, data.clone().password)
.hash()
.insert(&db.pool)
.await
.map_err(|e| {
tracing::error!("Error inserting creds: {:?}", e);
sentry::capture_error(&e);
e
}).expect("Could not insert creds");
println!("Created creds. {:?}", &creds.clone());
let acc = Account::new_devisa_credentials(user.id.clone(),creds.id.clone());
println!("ACCOUNT BEFORE INSERTION: {:?}", &acc.clone());
acc.clone().insert(&db.pool)
.await
.map_err(|e| {
tracing::error!("Error inserting account: {:?}", e);
sentry::capture_error(&e);
e
}).expect("Could not insert account");
println!("Created account {:?}", &acc.clone());
let profile = Profile { user_id: user.clone().id,..Default::default() }
.insert(&db.pool)
.await
.map_err(|e| {
tracing::error!("Error inserting profile: {:?}", e);
sentry::capture_error(&e);
e
}).expect("Could not insert profile");
println!("Created profile. {:?}", &profile.clone());
return Ok(HttpResponse::Ok()
.json(creds)
.with_header(("Content-Type", "application/json"))
.respond_to(&req));
}
/// First 1/3 of signup
pub async fn signup_user(req: HttpRequest, db: Data<Db>, data: Form<CredentialsSignup>) -> actix_web::Result<HttpResponse> {
Ok(HttpResponse::Ok().body(""))
}
/// Second 1/3 of signup
pub async fn signup_creds(req: HttpRequest, db: Data<Db>, data: Form<CredentialsSignup>) -> actix_web::Result<HttpResponse> {
Ok(HttpResponse::Ok().body(""))
}
/// Final third of signup (creates Acct + Profile)
pub async fn signup_account(req: HttpRequest, db: Data<Db>, data: Form<CredentialsSignup>) -> actix_web::Result<HttpResponse> {
Ok(HttpResponse::Ok().body(""))
}
/// NOTE: Credentials login -- three phases
/// 1. User provides e-mail and password (checks against Db user to authorize)
/// 2. Create a new JWT for user, pass in Cookies and/or header
/// 3. DB creates a new session row in the session table, use JWT as access key
/// The signup handler will handle steps 1 and 2, then pass on 3 to another handler.
pub async fn login_creds(
req: HttpRequest,
db: Data<Db>,
data: Form<CredentialsIn>,
session: ApiSession,
) -> ApiResult<impl Responder>
{
let ver = Credentials::verify(&db.pool, data.username.as_str(), data.password.as_str());
match ver.await {
Ok(creds) => {
tracing::info!("New user logged in! Username {:?}", &data.username);
let creds_id = creds.clone().id;
let user = User::get(&db.pool, creds_id).await?
.expect("No user with user_id from creds");
let user_id = user.clone().id;
session.insert(user_id.as_str(), &user).expect("Could not insert user API session");
let session = Session::create_two_day_session(&db.pool, user_id)
.await
.map_err(|e| {
tracing::info!("ERR: new session {}", e);
respond::err(e)
})
.unwrap_or_default();
session.clone().set_access_token()
.map_err(|e| {tracing::info!("ERR: creating JWT: {:?}", e);
sentry::capture_error(&e.root_cause());
respond::err(e)
})
.expect("Could not generate access token / Could not set session access token");
let _access_token = session.clone().access_token;
match session.insert(&db.pool).await {
Ok(sess) => {
let j = sess.access_token.clone();
let mut jwt_cookie = "dvsa-auth=".to_string();
jwt_cookie.extend(j.chars());
return Ok(HttpResponse::Accepted()
.content_type(header::ContentType::json())
.insert_header(("dvsa-token-auth",j.as_str()))
.cookie(api_session_cookie("/", "dvsa-token-auth", j.as_str()))
.insert_header(("x-session-token", j.as_str()))
.insert_header(("set-cookie", jwt_cookie.as_str()))
.json(creds))
},
Err(e) => {
sentry::capture_error(&e);
tracing::info!("Could not insert session into db! {}", e);
return Ok(respond::err(e));
}
}
},
Err(e) => {
sentry::capture_error(&e.root_cause());
Ok(HttpResponse::NotFound()
.content_type(ContentType::json())
.body(format!("No user with tht username: {}", e)))
}
}
}
// TODO handle logout in in-memory session object
/// Logs currently logged in user out of their session.
pub async fn logout_creds(
sess: ApiSession,
req: HttpRequest,
db: Data<Db>,
data: Json<User>,
) -> ApiResult<impl Responder>
{
let user_id = data.into_inner().id;
sess.remove(&user_id.as_str());
let _cookies = req.cookies().expect("Couild not load cookeis");
if let Some(_c) = req.cookie("dvsa-auth"){
let sess = Session::get_by_user_id(&db.pool, user_id.clone()).await
.expect("DB ERROR: Could not get session")
.expect("no session with that user id");
Session::delete(&db.pool, sess.id).await
.expect("DB ERROR: Could not delete session")
.expect("no session with that id");
if let Some(mut sess_cookie) = req.cookie("dvsa-token-auth") {
sess_cookie.make_removal();
tracing::info!("Logged out user successfully -- removed dvsa-auth and dvsa-cred-auth cookies for {}", &user_id);
return Ok(HttpResponse::Ok()
.del_cookie(&sess_cookie)
.body("Successfully logged out"))
}
return Ok(HttpResponse::Ok()
.body("User has dvsa-auth, but not dvsa-cred-auth cookies. No user to log out"))
}
Ok(HttpResponse::NotFound().body("No logged in user to log out"))
}
pub async fn check_creds(sess: Data<ApiSession>, req: HttpRequest, db: Data<Db>) -> impl Responder {
"".to_string()
}
pub async fn index(db: Data<Db>) -> impl Responder {
HttpResponse::Ok().body("hello")
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::*;
use actix_http::StatusCode;
use actix_web::{test::{TestRequest, self}, dev, web::{self, Form}};
use ap_com::models::{Account, Profile};
use ap_com::types::auth::{Provider, ProviderType};
fn new_creds_signup(username: &str, password: &str, email: &str, name: &str) -> CredentialsSignup {
CredentialsSignup {
username: username.to_string(),
password: password.to_string(),
email: email.to_string(),
name: name.to_string(),
}
}
fn new_creds_in(username: &str, password: &str) -> CredentialsIn {
CredentialsIn {
username: username.to_string(),
password: password.to_string(),
}
}
#[actix_rt::test]
async fn test_creds_login_ok() -> anyhow::Result<()> {
Ok(())
}
#[actix_rt::test]
async fn test_creds_logout_ok() -> anyhow::Result<()> {
Ok(())
}
#[actix_rt::test]
async fn test_creds_login_gives_jwt() -> anyhow::Result<()> {
Ok(())
}
#[actix_rt::test]
async fn test_creds_logout_removes_jwt() -> anyhow::Result<()> {
Ok(())
}
#[actix_rt::test]
async fn test_creds_signup_ok() -> anyhow::Result<()> {
let db = db().await?;
let creds_in = new_creds_signup("jerr_name", "jerr_name_pass", "[email protected]", "jerr");
creds_in.clone().signup_credentials(&db.pool).await?;
let req = TestRequest::get().uri("/auth/signup/creds")
.set_json(&creds_in)
.to_http_request();
let resp = signup(req, Data::new(db.clone()), Form(creds_in)).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let user_out = User::get_by_username(&db.clone().pool, "username1").await?.unwrap();
println!("Created user. {:?}", &user_out.clone());
assert_eq!(user_out.clone().name.unwrap(), "user1".to_string());
assert_eq!(user_out.clone().email.unwrap(), "[email protected]".to_string());
let creds_out = Credentials::get_by_user_id(&db.pool, user_out.clone().id).await?.unwrap();
println!("Created creds. {:?}", &creds_out.clone());
assert_eq!(creds_out.username, "username1".to_string());
assert_eq!(creds_out.password, "pass1".to_string());
assert_eq!(creds_out.user_id, user_out.clone().id);
let acct_out = Account::get_by_provider_account_id(&db.clone().pool, creds_out.clone().id)
.await?.unwrap();
println!("Created account {:?}", &acct_out.clone());
assert_eq!(acct_out.provider_type, ProviderType::Credentials);
assert_eq!(acct_out.provider_id, Provider::Devisa);
assert_eq!(acct_out.provider_account_id, creds_out.clone().id);
assert_eq!(acct_out.user_id, user_out.clone().id);
let prof_out = Profile::get_by_user_id(&db.pool, user_out.clone().id).await?.unwrap();
println!("Created profile. {:?}", &prof_out.clone());
assert_eq!(prof_out.user_id, user_out.clone().id);
Profile::delete(&db.clone().pool, prof_out.clone().id).await?;
Account::delete(&db.clone().pool, acct_out.clone().id).await?;
Credentials::delete(&db.clone().pool, creds_out.clone().id).await?;
User::delete(&db.clone().pool, user_out.clone().id).await?;
Ok(())
}
}
| 40.286667 | 127 | 0.583485 |
69f42aca8b6900a092fc09e73aff9b23eea1e817 | 18,465 | #![feature(once_cell)]
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
// warn on lints, that are included in `rust-lang/rust`s bootstrap
#![warn(rust_2018_idioms, unused_lifetimes)]
use itertools::Itertools;
use regex::Regex;
use std::collections::HashMap;
use std::ffi::OsStr;
use std::fs;
use std::lazy::SyncLazy;
use std::path::{Path, PathBuf};
use walkdir::WalkDir;
pub mod bless;
pub mod fmt;
pub mod ide_setup;
pub mod new_lint;
pub mod serve;
pub mod stderr_length_check;
pub mod update_lints;
static DEC_CLIPPY_LINT_RE: SyncLazy<Regex> = SyncLazy::new(|| {
Regex::new(
r#"(?x)
declare_clippy_lint!\s*[\{(]
(?:\s+///.*)*
\s+pub\s+(?P<name>[A-Z_][A-Z_0-9]*)\s*,\s*
(?P<cat>[a-z_]+)\s*,\s*
"(?P<desc>(?:[^"\\]+|\\(?s).(?-s))*)"\s*[})]
"#,
)
.unwrap()
});
static DEC_DEPRECATED_LINT_RE: SyncLazy<Regex> = SyncLazy::new(|| {
Regex::new(
r#"(?x)
declare_deprecated_lint!\s*[{(]\s*
(?:\s+///.*)*
\s+pub\s+(?P<name>[A-Z_][A-Z_0-9]*)\s*,\s*
"(?P<desc>(?:[^"\\]+|\\(?s).(?-s))*)"\s*[})]
"#,
)
.unwrap()
});
static NL_ESCAPE_RE: SyncLazy<Regex> = SyncLazy::new(|| Regex::new(r#"\\\n\s*"#).unwrap());
pub static DOCS_LINK: &str = "https://rust-lang.github.io/rust-clippy/master/index.html";
/// Lint data parsed from the Clippy source code.
#[derive(Clone, PartialEq, Debug)]
pub struct Lint {
pub name: String,
pub group: String,
pub desc: String,
pub deprecation: Option<String>,
pub module: String,
}
impl Lint {
#[must_use]
pub fn new(name: &str, group: &str, desc: &str, deprecation: Option<&str>, module: &str) -> Self {
Self {
name: name.to_lowercase(),
group: group.to_string(),
desc: NL_ESCAPE_RE.replace(&desc.replace("\\\"", "\""), "").to_string(),
deprecation: deprecation.map(ToString::to_string),
module: module.to_string(),
}
}
/// Returns all non-deprecated lints and non-internal lints
#[must_use]
pub fn usable_lints(lints: &[Self]) -> Vec<Self> {
lints
.iter()
.filter(|l| l.deprecation.is_none() && !l.group.starts_with("internal"))
.cloned()
.collect()
}
/// Returns all internal lints (not `internal_warn` lints)
#[must_use]
pub fn internal_lints(lints: &[Self]) -> Vec<Self> {
lints.iter().filter(|l| l.group == "internal").cloned().collect()
}
/// Returns all deprecated lints
#[must_use]
pub fn deprecated_lints(lints: &[Self]) -> Vec<Self> {
lints.iter().filter(|l| l.deprecation.is_some()).cloned().collect()
}
/// Returns the lints in a `HashMap`, grouped by the different lint groups
#[must_use]
pub fn by_lint_group(lints: impl Iterator<Item = Self>) -> HashMap<String, Vec<Self>> {
lints.map(|lint| (lint.group.to_string(), lint)).into_group_map()
}
}
/// Generates the Vec items for `register_lint_group` calls in `clippy_lints/src/lib.rs`.
#[must_use]
pub fn gen_lint_group_list<'a>(lints: impl Iterator<Item = &'a Lint>) -> Vec<String> {
lints
.map(|l| format!(" LintId::of({}::{}),", l.module, l.name.to_uppercase()))
.sorted()
.collect::<Vec<String>>()
}
/// Generates the `pub mod module_name` list in `clippy_lints/src/lib.rs`.
#[must_use]
pub fn gen_modules_list<'a>(lints: impl Iterator<Item = &'a Lint>) -> Vec<String> {
lints
.map(|l| &l.module)
.unique()
.map(|module| format!("mod {};", module))
.sorted()
.collect::<Vec<String>>()
}
/// Generates the list of lint links at the bottom of the README
#[must_use]
pub fn gen_changelog_lint_list<'a>(lints: impl Iterator<Item = &'a Lint>) -> Vec<String> {
lints
.sorted_by_key(|l| &l.name)
.map(|l| format!("[`{}`]: {}#{}", l.name, DOCS_LINK, l.name))
.collect()
}
/// Generates the `register_removed` code in `./clippy_lints/src/lib.rs`.
#[must_use]
pub fn gen_deprecated<'a>(lints: impl Iterator<Item = &'a Lint>) -> Vec<String> {
lints
.flat_map(|l| {
l.deprecation
.clone()
.map(|depr_text| {
vec![
" store.register_removed(".to_string(),
format!(" \"clippy::{}\",", l.name),
format!(" \"{}\",", depr_text),
" );".to_string(),
]
})
.expect("only deprecated lints should be passed")
})
.collect::<Vec<String>>()
}
#[must_use]
pub fn gen_register_lint_list<'a>(
internal_lints: impl Iterator<Item = &'a Lint>,
usable_lints: impl Iterator<Item = &'a Lint>,
) -> Vec<String> {
let header = " store.register_lints(&[".to_string();
let footer = " ]);".to_string();
let internal_lints = internal_lints
.sorted_by_key(|l| format!(" {}::{},", l.module, l.name.to_uppercase()))
.map(|l| {
format!(
" #[cfg(feature = \"internal-lints\")]\n {}::{},",
l.module,
l.name.to_uppercase()
)
});
let other_lints = usable_lints
.sorted_by_key(|l| format!(" {}::{},", l.module, l.name.to_uppercase()))
.map(|l| format!(" {}::{},", l.module, l.name.to_uppercase()))
.sorted();
let mut lint_list = vec![header];
lint_list.extend(internal_lints);
lint_list.extend(other_lints);
lint_list.push(footer);
lint_list
}
/// Gathers all files in `src/clippy_lints` and gathers all lints inside
pub fn gather_all() -> impl Iterator<Item = Lint> {
lint_files().flat_map(|f| gather_from_file(&f))
}
fn gather_from_file(dir_entry: &walkdir::DirEntry) -> impl Iterator<Item = Lint> {
let content = fs::read_to_string(dir_entry.path()).unwrap();
let path = dir_entry.path();
let filename = path.file_stem().unwrap();
let path_buf = path.with_file_name(filename);
let mut rel_path = path_buf
.strip_prefix(clippy_project_root().join("clippy_lints/src"))
.expect("only files in `clippy_lints/src` should be looked at");
// If the lints are stored in mod.rs, we get the module name from
// the containing directory:
if filename == "mod" {
rel_path = rel_path.parent().unwrap();
}
let module = rel_path
.components()
.map(|c| c.as_os_str().to_str().unwrap())
.collect::<Vec<_>>()
.join("::");
parse_contents(&content, &module)
}
fn parse_contents(content: &str, module: &str) -> impl Iterator<Item = Lint> {
let lints = DEC_CLIPPY_LINT_RE
.captures_iter(content)
.map(|m| Lint::new(&m["name"], &m["cat"], &m["desc"], None, module));
let deprecated = DEC_DEPRECATED_LINT_RE
.captures_iter(content)
.map(|m| Lint::new(&m["name"], "Deprecated", &m["desc"], Some(&m["desc"]), module));
// Removing the `.collect::<Vec<Lint>>().into_iter()` causes some lifetime issues due to the map
lints.chain(deprecated).collect::<Vec<Lint>>().into_iter()
}
/// Collects all .rs files in the `clippy_lints/src` directory
fn lint_files() -> impl Iterator<Item = walkdir::DirEntry> {
// We use `WalkDir` instead of `fs::read_dir` here in order to recurse into subdirectories.
// Otherwise we would not collect all the lints, for example in `clippy_lints/src/methods/`.
let path = clippy_project_root().join("clippy_lints/src");
WalkDir::new(path)
.into_iter()
.filter_map(Result::ok)
.filter(|f| f.path().extension() == Some(OsStr::new("rs")))
}
/// Whether a file has had its text changed or not
#[derive(PartialEq, Debug)]
pub struct FileChange {
pub changed: bool,
pub new_lines: String,
}
/// Replaces a region in a file delimited by two lines matching regexes.
///
/// `path` is the relative path to the file on which you want to perform the replacement.
///
/// See `replace_region_in_text` for documentation of the other options.
///
/// # Panics
///
/// Panics if the path could not read or then written
pub fn replace_region_in_file<F>(
path: &Path,
start: &str,
end: &str,
replace_start: bool,
write_back: bool,
replacements: F,
) -> FileChange
where
F: FnOnce() -> Vec<String>,
{
let contents = fs::read_to_string(path).unwrap_or_else(|e| panic!("Cannot read from {}: {}", path.display(), e));
let file_change = replace_region_in_text(&contents, start, end, replace_start, replacements);
if write_back {
if let Err(e) = fs::write(path, file_change.new_lines.as_bytes()) {
panic!("Cannot write to {}: {}", path.display(), e);
}
}
file_change
}
/// Replaces a region in a text delimited by two lines matching regexes.
///
/// * `text` is the input text on which you want to perform the replacement
/// * `start` is a `&str` that describes the delimiter line before the region you want to replace.
/// As the `&str` will be converted to a `Regex`, this can contain regex syntax, too.
/// * `end` is a `&str` that describes the delimiter line until where the replacement should happen.
/// As the `&str` will be converted to a `Regex`, this can contain regex syntax, too.
/// * If `replace_start` is true, the `start` delimiter line is replaced as well. The `end`
/// delimiter line is never replaced.
/// * `replacements` is a closure that has to return a `Vec<String>` which contains the new text.
///
/// If you want to perform the replacement on files instead of already parsed text,
/// use `replace_region_in_file`.
///
/// # Example
///
/// ```
/// let the_text = "replace_start\nsome text\nthat will be replaced\nreplace_end";
/// let result =
/// clippy_dev::replace_region_in_text(the_text, "replace_start", "replace_end", false, || {
/// vec!["a different".to_string(), "text".to_string()]
/// })
/// .new_lines;
/// assert_eq!("replace_start\na different\ntext\nreplace_end", result);
/// ```
///
/// # Panics
///
/// Panics if start or end is not valid regex
pub fn replace_region_in_text<F>(text: &str, start: &str, end: &str, replace_start: bool, replacements: F) -> FileChange
where
F: FnOnce() -> Vec<String>,
{
let replace_it = replacements();
let mut in_old_region = false;
let mut found = false;
let mut new_lines = vec![];
let start = Regex::new(start).unwrap();
let end = Regex::new(end).unwrap();
for line in text.lines() {
if in_old_region {
if end.is_match(line) {
in_old_region = false;
new_lines.extend(replace_it.clone());
new_lines.push(line.to_string());
}
} else if start.is_match(line) {
if !replace_start {
new_lines.push(line.to_string());
}
in_old_region = true;
found = true;
} else {
new_lines.push(line.to_string());
}
}
if !found {
// This happens if the provided regex in `clippy_dev/src/main.rs` does not match in the
// given text or file. Most likely this is an error on the programmer's side and the Regex
// is incorrect.
eprintln!("error: regex \n{:?}\ndoesn't match. You may have to update it.", start);
std::process::exit(1);
}
let mut new_lines = new_lines.join("\n");
if text.ends_with('\n') {
new_lines.push('\n');
}
let changed = new_lines != text;
FileChange { changed, new_lines }
}
/// Returns the path to the Clippy project directory
///
/// # Panics
///
/// Panics if the current directory could not be retrieved, there was an error reading any of the
/// Cargo.toml files or ancestor directory is the clippy root directory
#[must_use]
pub fn clippy_project_root() -> PathBuf {
let current_dir = std::env::current_dir().unwrap();
for path in current_dir.ancestors() {
let result = std::fs::read_to_string(path.join("Cargo.toml"));
if let Err(err) = &result {
if err.kind() == std::io::ErrorKind::NotFound {
continue;
}
}
let content = result.unwrap();
if content.contains("[package]\nname = \"clippy\"") {
return path.to_path_buf();
}
}
panic!("error: Can't determine root of project. Please run inside a Clippy working dir.");
}
#[test]
fn test_parse_contents() {
let result: Vec<Lint> = parse_contents(
r#"
declare_clippy_lint! {
pub PTR_ARG,
style,
"really long \
text"
}
declare_clippy_lint!{
pub DOC_MARKDOWN,
pedantic,
"single line"
}
/// some doc comment
declare_deprecated_lint! {
pub SHOULD_ASSERT_EQ,
"`assert!()` will be more flexible with RFC 2011"
}
"#,
"module_name",
)
.collect();
let expected = vec![
Lint::new("ptr_arg", "style", "really long text", None, "module_name"),
Lint::new("doc_markdown", "pedantic", "single line", None, "module_name"),
Lint::new(
"should_assert_eq",
"Deprecated",
"`assert!()` will be more flexible with RFC 2011",
Some("`assert!()` will be more flexible with RFC 2011"),
"module_name",
),
];
assert_eq!(expected, result);
}
#[test]
fn test_replace_region() {
let text = "\nabc\n123\n789\ndef\nghi";
let expected = FileChange {
changed: true,
new_lines: "\nabc\nhello world\ndef\nghi".to_string(),
};
let result = replace_region_in_text(text, r#"^\s*abc$"#, r#"^\s*def"#, false, || {
vec!["hello world".to_string()]
});
assert_eq!(expected, result);
}
#[test]
fn test_replace_region_with_start() {
let text = "\nabc\n123\n789\ndef\nghi";
let expected = FileChange {
changed: true,
new_lines: "\nhello world\ndef\nghi".to_string(),
};
let result = replace_region_in_text(text, r#"^\s*abc$"#, r#"^\s*def"#, true, || {
vec!["hello world".to_string()]
});
assert_eq!(expected, result);
}
#[test]
fn test_replace_region_no_changes() {
let text = "123\n456\n789";
let expected = FileChange {
changed: false,
new_lines: "123\n456\n789".to_string(),
};
let result = replace_region_in_text(text, r#"^\s*123$"#, r#"^\s*456"#, false, Vec::new);
assert_eq!(expected, result);
}
#[test]
fn test_usable_lints() {
let lints = vec![
Lint::new("should_assert_eq", "Deprecated", "abc", Some("Reason"), "module_name"),
Lint::new("should_assert_eq2", "Not Deprecated", "abc", None, "module_name"),
Lint::new("should_assert_eq2", "internal", "abc", None, "module_name"),
Lint::new("should_assert_eq2", "internal_style", "abc", None, "module_name"),
];
let expected = vec![Lint::new(
"should_assert_eq2",
"Not Deprecated",
"abc",
None,
"module_name",
)];
assert_eq!(expected, Lint::usable_lints(&lints));
}
#[test]
fn test_by_lint_group() {
let lints = vec![
Lint::new("should_assert_eq", "group1", "abc", None, "module_name"),
Lint::new("should_assert_eq2", "group2", "abc", None, "module_name"),
Lint::new("incorrect_match", "group1", "abc", None, "module_name"),
];
let mut expected: HashMap<String, Vec<Lint>> = HashMap::new();
expected.insert(
"group1".to_string(),
vec![
Lint::new("should_assert_eq", "group1", "abc", None, "module_name"),
Lint::new("incorrect_match", "group1", "abc", None, "module_name"),
],
);
expected.insert(
"group2".to_string(),
vec![Lint::new("should_assert_eq2", "group2", "abc", None, "module_name")],
);
assert_eq!(expected, Lint::by_lint_group(lints.into_iter()));
}
#[test]
fn test_gen_changelog_lint_list() {
let lints = vec![
Lint::new("should_assert_eq", "group1", "abc", None, "module_name"),
Lint::new("should_assert_eq2", "group2", "abc", None, "module_name"),
];
let expected = vec![
format!("[`should_assert_eq`]: {}#should_assert_eq", DOCS_LINK.to_string()),
format!("[`should_assert_eq2`]: {}#should_assert_eq2", DOCS_LINK.to_string()),
];
assert_eq!(expected, gen_changelog_lint_list(lints.iter()));
}
#[test]
fn test_gen_deprecated() {
let lints = vec![
Lint::new(
"should_assert_eq",
"group1",
"abc",
Some("has been superseded by should_assert_eq2"),
"module_name",
),
Lint::new(
"another_deprecated",
"group2",
"abc",
Some("will be removed"),
"module_name",
),
];
let expected: Vec<String> = vec![
" store.register_removed(",
" \"clippy::should_assert_eq\",",
" \"has been superseded by should_assert_eq2\",",
" );",
" store.register_removed(",
" \"clippy::another_deprecated\",",
" \"will be removed\",",
" );",
]
.into_iter()
.map(String::from)
.collect();
assert_eq!(expected, gen_deprecated(lints.iter()));
}
#[test]
#[should_panic]
fn test_gen_deprecated_fail() {
let lints = vec![Lint::new("should_assert_eq2", "group2", "abc", None, "module_name")];
let _deprecated_lints = gen_deprecated(lints.iter());
}
#[test]
fn test_gen_modules_list() {
let lints = vec![
Lint::new("should_assert_eq", "group1", "abc", None, "module_name"),
Lint::new("incorrect_stuff", "group3", "abc", None, "another_module"),
];
let expected = vec!["mod another_module;".to_string(), "mod module_name;".to_string()];
assert_eq!(expected, gen_modules_list(lints.iter()));
}
#[test]
fn test_gen_lint_group_list() {
let lints = vec![
Lint::new("abc", "group1", "abc", None, "module_name"),
Lint::new("should_assert_eq", "group1", "abc", None, "module_name"),
Lint::new("internal", "internal_style", "abc", None, "module_name"),
];
let expected = vec![
" LintId::of(module_name::ABC),".to_string(),
" LintId::of(module_name::INTERNAL),".to_string(),
" LintId::of(module_name::SHOULD_ASSERT_EQ),".to_string(),
];
assert_eq!(expected, gen_lint_group_list(lints.iter()));
}
| 32.914439 | 120 | 0.588681 |
290b29ae6582154db2f612e1e6a8c07c7771f02e | 424 | use nano_ecs::*;
#[derive(Clone)]
pub struct Position(pub f32);
#[derive(Clone)]
pub struct Velocity(pub f32);
ecs!{4: Position, Velocity}
fn main() {
let mut world = World::new();
world.push((Position(0.0), Velocity(0.0)));
world.push(Position(1.0));
system_ids!(world,
?|n| !world.has_component::<Velocity>(n);
id,
|pos: &Position| {
println!("{}: {}", id, pos.0);
});
}
| 20.190476 | 49 | 0.568396 |
e66a35452f0d878d5624f5384089c8bea85e722b | 3,168 | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::source::snippet;
use clippy_utils::ty::{is_type_diagnostic_item, is_type_lang_item};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind, LangItem, MatchSource};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// Checks for `match vec[idx]` or `match vec[n..m]`.
///
/// ### Why is this bad?
/// This can panic at runtime.
///
/// ### Example
/// ```rust, no_run
/// let arr = vec![0, 1, 2, 3];
/// let idx = 1;
///
/// // Bad
/// match arr[idx] {
/// 0 => println!("{}", 0),
/// 1 => println!("{}", 3),
/// _ => {},
/// }
/// ```
/// Use instead:
/// ```rust, no_run
/// let arr = vec![0, 1, 2, 3];
/// let idx = 1;
///
/// // Good
/// match arr.get(idx) {
/// Some(0) => println!("{}", 0),
/// Some(1) => println!("{}", 3),
/// _ => {},
/// }
/// ```
pub MATCH_ON_VEC_ITEMS,
pedantic,
"matching on vector elements can panic"
}
declare_lint_pass!(MatchOnVecItems => [MATCH_ON_VEC_ITEMS]);
impl<'tcx> LateLintPass<'tcx> for MatchOnVecItems {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) {
if_chain! {
if !in_external_macro(cx.sess(), expr.span);
if let ExprKind::Match(match_expr, _, MatchSource::Normal) = expr.kind;
if let Some(idx_expr) = is_vec_indexing(cx, match_expr);
if let ExprKind::Index(vec, idx) = idx_expr.kind;
then {
// FIXME: could be improved to suggest surrounding every pattern with Some(_),
// but only when `or_patterns` are stabilized.
span_lint_and_sugg(
cx,
MATCH_ON_VEC_ITEMS,
match_expr.span,
"indexing into a vector may panic",
"try this",
format!(
"{}.get({})",
snippet(cx, vec.span, ".."),
snippet(cx, idx.span, "..")
),
Applicability::MaybeIncorrect
);
}
}
}
}
fn is_vec_indexing<'tcx>(cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) -> Option<&'tcx Expr<'tcx>> {
if_chain! {
if let ExprKind::Index(array, index) = expr.kind;
if is_vector(cx, array);
if !is_full_range(cx, index);
then {
return Some(expr);
}
}
None
}
fn is_vector(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
let ty = cx.typeck_results().expr_ty(expr);
let ty = ty.peel_refs();
is_type_diagnostic_item(cx, ty, sym::vec_type)
}
fn is_full_range(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
let ty = cx.typeck_results().expr_ty(expr);
let ty = ty.peel_refs();
is_type_lang_item(cx, ty, LangItem::RangeFull)
}
| 30.461538 | 102 | 0.527146 |
034a73a4708e0fe02202f17fe09554b50b0ba790 | 8,495 | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::model::{
component::{BindReason, ComponentInstance},
error::ModelError,
model::Model,
},
::routing::error::ComponentInstanceError,
fidl_fuchsia_component as fcomponent, fidl_fuchsia_sys2 as fsys,
futures::prelude::*,
log::*,
moniker::{
AbsoluteMonikerBase, MonikerError, PartialAbsoluteMoniker, PartialRelativeMoniker,
RelativeMonikerBase,
},
std::{
convert::TryFrom,
sync::{Arc, Weak},
},
};
#[derive(Clone)]
pub struct LifecycleController {
model: Weak<Model>,
prefix: PartialAbsoluteMoniker,
}
#[derive(Debug)]
enum LifecycleOperation {
Bind,
Resolve,
Stop,
}
impl LifecycleController {
pub fn new(model: Weak<Model>, prefix: PartialAbsoluteMoniker) -> Self {
Self { model, prefix }
}
async fn perform_operation(
&self,
operation: LifecycleOperation,
moniker: String,
recursive_stop: bool,
) -> Result<(), fcomponent::Error> {
let relative_moniker =
PartialRelativeMoniker::try_from(moniker.as_str()).map_err(|e: MonikerError| {
debug!("lifecycle controller received invalid component moniker: {}", e);
fcomponent::Error::InvalidArguments
})?;
if !relative_moniker.up_path().is_empty() {
debug!(
"lifecycle controller received moniker that attempted to reach outside its scope"
);
return Err(fcomponent::Error::InvalidArguments);
}
let abs_moniker = PartialAbsoluteMoniker::from_relative(&self.prefix, &relative_moniker)
.map_err(|e: MonikerError| {
debug!("lifecycle controller received invalid component moniker: {}", e);
fcomponent::Error::InvalidArguments
})?;
let model = self.model.upgrade().ok_or(fcomponent::Error::Internal)?;
let component = model.look_up(&abs_moniker).await.map_err(|e| match e {
e @ ModelError::ResolverError { .. } | e @ ModelError::ComponentInstanceError {
err: ComponentInstanceError::ResolveFailed { .. }
} => {
debug!(
"lifecycle controller failed to resolve component instance {}: {:?}",
abs_moniker,
e
);
fcomponent::Error::InstanceCannotResolve
}
e @ ModelError::ComponentInstanceError {
err: ComponentInstanceError::InstanceNotFound { .. },
} => {
debug!(
"lifecycle controller was asked to perform an operation on a component instance that doesn't exist {}: {:?}",
abs_moniker,
e,
);
fcomponent::Error::InstanceNotFound
}
e => {
error!(
"unexpected error encountered by lifecycle controller while looking up component {}: {:?}",
abs_moniker,
e,
);
fcomponent::Error::Internal
}
})?;
match operation {
LifecycleOperation::Resolve => Ok(()),
LifecycleOperation::Bind => {
let _: Arc<ComponentInstance> =
component.bind(&BindReason::Debug).await.map_err(|e: ModelError| {
debug!(
"lifecycle controller failed to bind to component instance {}: {:?}",
abs_moniker, e
);
fcomponent::Error::InstanceCannotStart
})?;
Ok(())
}
LifecycleOperation::Stop => {
component.stop_instance(false, recursive_stop).await.map_err(|e: ModelError| {
debug!(
"lifecycle controller failed to stop component instance {} (recursive_stop={}): {:?}",
abs_moniker, recursive_stop, e
);
fcomponent::Error::Internal
})
}
}
}
pub async fn serve(&self, mut stream: fsys::LifecycleControllerRequestStream) {
while let Ok(Some(operation)) = stream.try_next().await {
match operation {
fsys::LifecycleControllerRequest::Resolve { moniker, responder } => {
let mut res =
self.perform_operation(LifecycleOperation::Resolve, moniker, false).await;
let _ = responder.send(&mut res);
}
fsys::LifecycleControllerRequest::Bind { moniker, responder } => {
let mut res =
self.perform_operation(LifecycleOperation::Bind, moniker, false).await;
let _ = responder.send(&mut res);
}
fsys::LifecycleControllerRequest::Stop { moniker, responder, is_recursive } => {
let mut res = self
.perform_operation(LifecycleOperation::Stop, moniker, is_recursive)
.await;
let _ = responder.send(&mut res);
}
}
}
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::model::testing::test_helpers::{TestEnvironmentBuilder, TestModelResult},
cm_rust_testing::ComponentDeclBuilder,
fidl::endpoints::create_proxy_and_stream,
fidl_fuchsia_sys2 as fsys, fuchsia_async as fasync,
std::sync::Arc,
};
#[fuchsia::test]
async fn lifecycle_controller_test() {
let components = vec![
(
"root",
ComponentDeclBuilder::new()
.add_child(cm_rust::ChildDecl {
name: "a".to_string(),
url: "test:///a".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
})
.add_child(cm_rust::ChildDecl {
name: "cant-resolve".to_string(),
url: "cant-resolve://cant-resolve".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
})
.build(),
),
(
"a",
ComponentDeclBuilder::new()
.add_child(cm_rust::ChildDecl {
name: "b".to_string(),
url: "test:///b".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
})
.build(),
),
("b", ComponentDeclBuilder::new().build()),
];
let TestModelResult { model, .. } =
TestEnvironmentBuilder::new().set_components(components).build().await;
let lifecycle_controller = LifecycleController::new(Arc::downgrade(&model), vec![].into());
let (lifecycle_proxy, lifecycle_request_stream) =
create_proxy_and_stream::<fsys::LifecycleControllerMarker>().unwrap();
// async move {} is used here because we want this to own the lifecycle_controller
let _lifecycle_server_task = fasync::Task::local(async move {
lifecycle_controller.serve(lifecycle_request_stream).await
});
assert_eq!(lifecycle_proxy.resolve(".").await.unwrap(), Ok(()));
assert_eq!(lifecycle_proxy.resolve("./a").await.unwrap(), Ok(()));
assert_eq!(
lifecycle_proxy.resolve(".\\scope-escape-attempt").await.unwrap(),
Err(fcomponent::Error::InvalidArguments)
);
assert_eq!(
lifecycle_proxy.resolve("./doesnt-exist").await.unwrap(),
Err(fcomponent::Error::InstanceNotFound)
);
assert_eq!(
lifecycle_proxy.resolve("./cant-resolve").await.unwrap(),
Err(fcomponent::Error::InstanceCannotResolve)
);
}
}
| 37.422907 | 129 | 0.518776 |
bba649aeb4b58ed179e7942ae6beb7aeac9c1232 | 947 | use serde::{Serialize, Deserialize};
use crate::validate::Validate;
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct CreditCard {
pub number: String,
pub cardholder_name: String,
pub expiration_month: String,
pub expiration_year: String,
pub brand: Option<String>,
pub security_code: Option<String>
}
impl CreditCard {
pub fn new() -> Self {
CreditCard {
number: "".to_string(),
cardholder_name: "".to_string(),
expiration_month: "".to_string(),
expiration_year: "".to_string(),
brand: None,
security_code: None
}
}
pub fn apply_brand(&mut self) {
match Validate::from(self.number.clone().as_str()) {
Ok(result) => {
self.brand = Option::from(result.card_type.name());
},
Err(err) => println!("Card is invalid: {:?}", err)
}
}
}
| 27.057143 | 67 | 0.565998 |
6ab25bbf15be0cf13d2ac2ea6bdb362683e3bc5e | 720 | use serde::{Deserialize, Serialize};
use crate::prototypes::{Prototype, Visitor};
use crate::types::*; // TODO: Import only specific types
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Font {
/// from :: string
from: String,
/// name :: string
name: String,
/// size :: int32
size: i32,
/// type :: string
r#type: String,
/// border :: bool (optional)
border: Option<bool>,
/// border_color :: Color (optional)
border_color: Option<Color>,
/// filtered :: bool (optional)
filtered: Option<bool>,
/// spacing :: float (optional)
spacing: Option<f32>,
}
impl Prototype for Font {
const TYPE: Option<&'static str> = Some("font");
}
| 20 | 56 | 0.605556 |
7af015061202115c7ba854d4f465a0f40b710f59 | 14 | pub mod f252;
| 7 | 13 | 0.714286 |
89b11bff3f2b75a92a8282c4972effbb8fcb1ed7 | 216 | use winfw::disable_fw_rule;
fn main() {
// disable rule
match disable_fw_rule(&"TEST_INTERFACE_RULE".to_string()) {
Err(e) => println!("Error: {}", e),
Ok(()) => println!("Success"),
}
}
| 21.6 | 63 | 0.560185 |
567e3ad64a7353df9775744941d5cb63cf8a3e27 | 929 | use crate::config::{ModuleConfig, RootModuleConfig};
use starship_module_config_derive::ModuleConfig;
#[derive(Clone, ModuleConfig)]
pub struct StatusConfig<'a> {
pub format: &'a str,
pub symbol: &'a str,
pub not_executable_symbol: &'a str,
pub not_found_symbol: &'a str,
pub sigint_symbol: &'a str,
pub signal_symbol: &'a str,
pub style: &'a str,
pub map_symbol: bool,
pub recognize_signal_code: bool,
pub disabled: bool,
}
impl<'a> RootModuleConfig<'a> for StatusConfig<'a> {
fn new() -> Self {
StatusConfig {
format: "[$symbol$status]($style) ",
symbol: "✖",
not_executable_symbol: "🚫",
not_found_symbol: "🔍",
sigint_symbol: "🧱",
signal_symbol: "⚡",
style: "bold red",
map_symbol: false,
recognize_signal_code: true,
disabled: true,
}
}
}
| 26.542857 | 52 | 0.575888 |
cc304cb10a60c875447f98581389cf7ec9fbac99 | 45,503 | use either::Either;
use hir::{known, Callable, HasVisibility, HirDisplay, Semantics, TypeInfo};
use ide_db::{base_db::FileRange, helpers::FamousDefs, RootDatabase};
use itertools::Itertools;
use stdx::to_lower_snake_case;
use syntax::{
ast::{self, AstNode, HasArgList, HasName, UnaryOp},
match_ast, Direction, NodeOrToken, SmolStr, SyntaxKind, TextRange, T,
};
use crate::FileId;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct InlayHintsConfig {
pub type_hints: bool,
pub parameter_hints: bool,
pub chaining_hints: bool,
pub hide_named_constructor_hints: bool,
pub max_length: Option<usize>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum InlayKind {
TypeHint,
ParameterHint,
ChainingHint,
}
#[derive(Debug)]
pub struct InlayHint {
pub range: TextRange,
pub kind: InlayKind,
pub label: SmolStr,
}
// Feature: Inlay Hints
//
// rust-analyzer shows additional information inline with the source code.
// Editors usually render this using read-only virtual text snippets interspersed with code.
//
// rust-analyzer shows hints for
//
// * types of local variables
// * names of function arguments
// * types of chained expressions
//
// **Note:** VS Code does not have native support for inlay hints https://github.com/microsoft/vscode/issues/16221[yet] and the hints are implemented using decorations.
// This approach has limitations, the caret movement and bracket highlighting near the edges of the hint may be weird:
// https://github.com/rust-analyzer/rust-analyzer/issues/1623[1], https://github.com/rust-analyzer/rust-analyzer/issues/3453[2].
//
// |===
// | Editor | Action Name
//
// | VS Code | **Rust Analyzer: Toggle inlay hints*
// |===
//
// image::https://user-images.githubusercontent.com/48062697/113020660-b5f98b80-917a-11eb-8d70-3be3fd558cdd.png[]
pub(crate) fn inlay_hints(
db: &RootDatabase,
file_id: FileId,
config: &InlayHintsConfig,
) -> Vec<InlayHint> {
let _p = profile::span("inlay_hints");
let sema = Semantics::new(db);
let file = sema.parse(file_id);
let file = file.syntax();
let mut res = Vec::new();
for node in file.descendants() {
if let Some(expr) = ast::Expr::cast(node.clone()) {
get_chaining_hints(&mut res, &sema, config, &expr);
match expr {
ast::Expr::CallExpr(it) => {
get_param_name_hints(&mut res, &sema, config, ast::Expr::from(it));
}
ast::Expr::MethodCallExpr(it) => {
get_param_name_hints(&mut res, &sema, config, ast::Expr::from(it));
}
_ => (),
}
} else if let Some(it) = ast::IdentPat::cast(node.clone()) {
get_bind_pat_hints(&mut res, &sema, config, &it);
}
}
res
}
fn get_chaining_hints(
acc: &mut Vec<InlayHint>,
sema: &Semantics<RootDatabase>,
config: &InlayHintsConfig,
expr: &ast::Expr,
) -> Option<()> {
if !config.chaining_hints {
return None;
}
if matches!(expr, ast::Expr::RecordExpr(_)) {
return None;
}
let descended = sema.descend_node_into_attributes(expr.clone()).pop();
let desc_expr = descended.as_ref().unwrap_or(expr);
let krate = sema.scope(desc_expr.syntax()).module().map(|it| it.krate());
let famous_defs = FamousDefs(sema, krate);
let mut tokens = expr
.syntax()
.siblings_with_tokens(Direction::Next)
.filter_map(NodeOrToken::into_token)
.filter(|t| match t.kind() {
SyntaxKind::WHITESPACE if !t.text().contains('\n') => false,
SyntaxKind::COMMENT => false,
_ => true,
});
// Chaining can be defined as an expression whose next sibling tokens are newline and dot
// Ignoring extra whitespace and comments
let next = tokens.next()?.kind();
if next == SyntaxKind::WHITESPACE {
let mut next_next = tokens.next()?.kind();
while next_next == SyntaxKind::WHITESPACE {
next_next = tokens.next()?.kind();
}
if next_next == T![.] {
let ty = sema.type_of_expr(desc_expr)?.original;
if ty.is_unknown() {
return None;
}
if matches!(expr, ast::Expr::PathExpr(_)) {
if let Some(hir::Adt::Struct(st)) = ty.as_adt() {
if st.fields(sema.db).is_empty() {
return None;
}
}
}
acc.push(InlayHint {
range: expr.syntax().text_range(),
kind: InlayKind::ChainingHint,
label: hint_iterator(sema, &famous_defs, config, &ty).unwrap_or_else(|| {
ty.display_truncated(sema.db, config.max_length).to_string().into()
}),
});
}
}
Some(())
}
fn get_param_name_hints(
acc: &mut Vec<InlayHint>,
sema: &Semantics<RootDatabase>,
config: &InlayHintsConfig,
expr: ast::Expr,
) -> Option<()> {
if !config.parameter_hints {
return None;
}
let (callable, arg_list) = get_callable(sema, &expr)?;
let hints = callable
.params(sema.db)
.into_iter()
.zip(arg_list.args())
.filter_map(|((param, _ty), arg)| {
// Only annotate hints for expressions that exist in the original file
let range = sema.original_range_opt(arg.syntax())?;
let param_name = match param? {
Either::Left(_) => "self".to_string(),
Either::Right(pat) => match pat {
ast::Pat::IdentPat(it) => it.name()?.to_string(),
_ => return None,
},
};
Some((param_name, arg, range))
})
.filter(|(param_name, arg, _)| {
!should_hide_param_name_hint(sema, &callable, param_name, arg)
})
.map(|(param_name, _, FileRange { range, .. })| InlayHint {
range,
kind: InlayKind::ParameterHint,
label: param_name.into(),
});
acc.extend(hints);
Some(())
}
fn get_bind_pat_hints(
acc: &mut Vec<InlayHint>,
sema: &Semantics<RootDatabase>,
config: &InlayHintsConfig,
pat: &ast::IdentPat,
) -> Option<()> {
if !config.type_hints {
return None;
}
let descended = sema.descend_node_into_attributes(pat.clone()).pop();
let desc_pat = descended.as_ref().unwrap_or(pat);
let ty = sema.type_of_pat(&desc_pat.clone().into())?.original;
if should_not_display_type_hint(sema, pat, &ty) {
return None;
}
let krate = sema.scope(desc_pat.syntax()).module().map(|it| it.krate());
let famous_defs = FamousDefs(sema, krate);
let label = hint_iterator(sema, &famous_defs, config, &ty);
let label = match label {
Some(label) => label,
None => {
let ty_name = ty.display_truncated(sema.db, config.max_length).to_string();
if config.hide_named_constructor_hints
&& is_named_constructor(sema, pat, &ty_name).is_some()
{
return None;
}
ty_name.into()
}
};
acc.push(InlayHint {
range: match pat.name() {
Some(name) => name.syntax().text_range(),
None => pat.syntax().text_range(),
},
kind: InlayKind::TypeHint,
label,
});
Some(())
}
fn is_named_constructor(
sema: &Semantics<RootDatabase>,
pat: &ast::IdentPat,
ty_name: &str,
) -> Option<()> {
let let_node = pat.syntax().parent()?;
let expr = match_ast! {
match let_node {
ast::LetStmt(it) => it.initializer(),
ast::Condition(it) => it.expr(),
_ => None,
}
}?;
let expr = sema.descend_node_into_attributes(expr.clone()).pop().unwrap_or(expr);
// unwrap postfix expressions
let expr = match expr {
ast::Expr::TryExpr(it) => it.expr(),
ast::Expr::AwaitExpr(it) => it.expr(),
expr => Some(expr),
}?;
let expr = match expr {
ast::Expr::CallExpr(call) => match call.expr()? {
ast::Expr::PathExpr(path) => path,
_ => return None,
},
ast::Expr::PathExpr(path) => path,
_ => return None,
};
let path = expr.path()?;
let callable = sema.type_of_expr(&ast::Expr::PathExpr(expr))?.original.as_callable(sema.db);
let callable_kind = callable.map(|it| it.kind());
let qual_seg = match callable_kind {
Some(hir::CallableKind::Function(_) | hir::CallableKind::TupleEnumVariant(_)) => {
path.qualifier()?.segment()
}
_ => path.segment(),
}?;
let ctor_name = match qual_seg.kind()? {
ast::PathSegmentKind::Name(name_ref) => {
match qual_seg.generic_arg_list().map(|it| it.generic_args()) {
Some(generics) => format!("{}<{}>", name_ref, generics.format(", ")),
None => name_ref.to_string(),
}
}
ast::PathSegmentKind::Type { type_ref: Some(ty), trait_ref: None } => ty.to_string(),
_ => return None,
};
(ctor_name == ty_name).then(|| ())
}
/// Checks if the type is an Iterator from std::iter and replaces its hint with an `impl Iterator<Item = Ty>`.
fn hint_iterator(
sema: &Semantics<RootDatabase>,
famous_defs: &FamousDefs,
config: &InlayHintsConfig,
ty: &hir::Type,
) -> Option<SmolStr> {
let db = sema.db;
let strukt = ty.strip_references().as_adt()?;
let krate = strukt.module(db).krate();
if krate != famous_defs.core()? {
return None;
}
let iter_trait = famous_defs.core_iter_Iterator()?;
let iter_mod = famous_defs.core_iter()?;
// Assert that this struct comes from `core::iter`.
if !(strukt.visibility(db) == hir::Visibility::Public
&& strukt.module(db).path_to_root(db).contains(&iter_mod))
{
return None;
}
if ty.impls_trait(db, iter_trait, &[]) {
let assoc_type_item = iter_trait.items(db).into_iter().find_map(|item| match item {
hir::AssocItem::TypeAlias(alias) if alias.name(db) == known::Item => Some(alias),
_ => None,
})?;
if let Some(ty) = ty.normalize_trait_assoc_type(db, &[], assoc_type_item) {
const LABEL_START: &str = "impl Iterator<Item = ";
const LABEL_END: &str = ">";
let ty_display = hint_iterator(sema, famous_defs, config, &ty)
.map(|assoc_type_impl| assoc_type_impl.to_string())
.unwrap_or_else(|| {
ty.display_truncated(
db,
config
.max_length
.map(|len| len.saturating_sub(LABEL_START.len() + LABEL_END.len())),
)
.to_string()
});
return Some(format!("{}{}{}", LABEL_START, ty_display, LABEL_END).into());
}
}
None
}
fn pat_is_enum_variant(db: &RootDatabase, bind_pat: &ast::IdentPat, pat_ty: &hir::Type) -> bool {
if let Some(hir::Adt::Enum(enum_data)) = pat_ty.as_adt() {
let pat_text = bind_pat.to_string();
enum_data
.variants(db)
.into_iter()
.map(|variant| variant.name(db).to_smol_str())
.any(|enum_name| enum_name == pat_text)
} else {
false
}
}
fn should_not_display_type_hint(
sema: &Semantics<RootDatabase>,
bind_pat: &ast::IdentPat,
pat_ty: &hir::Type,
) -> bool {
let db = sema.db;
if pat_ty.is_unknown() {
return true;
}
if let Some(hir::Adt::Struct(s)) = pat_ty.as_adt() {
if s.fields(db).is_empty() && s.name(db).to_smol_str() == bind_pat.to_string() {
return true;
}
}
for node in bind_pat.syntax().ancestors() {
match_ast! {
match node {
ast::LetStmt(it) => return it.ty().is_some(),
ast::Param(it) => return it.ty().is_some(),
ast::MatchArm(_it) => return pat_is_enum_variant(db, bind_pat, pat_ty),
ast::IfExpr(it) => {
return it.condition().and_then(|condition| condition.pat()).is_some()
&& pat_is_enum_variant(db, bind_pat, pat_ty);
},
ast::WhileExpr(it) => {
return it.condition().and_then(|condition| condition.pat()).is_some()
&& pat_is_enum_variant(db, bind_pat, pat_ty);
},
ast::ForExpr(it) => {
// We *should* display hint only if user provided "in {expr}" and we know the type of expr (and it's not unit).
// Type of expr should be iterable.
return it.in_token().is_none() ||
it.iterable()
.and_then(|iterable_expr| sema.type_of_expr(&iterable_expr))
.map(TypeInfo::original)
.map_or(true, |iterable_ty| iterable_ty.is_unknown() || iterable_ty.is_unit())
},
_ => (),
}
}
}
false
}
fn should_hide_param_name_hint(
sema: &Semantics<RootDatabase>,
callable: &hir::Callable,
param_name: &str,
argument: &ast::Expr,
) -> bool {
// These are to be tested in the `parameter_hint_heuristics` test
// hide when:
// - the parameter name is a suffix of the function's name
// - the argument is an enum whose name is equal to the parameter
// - exact argument<->parameter match(ignoring leading underscore) or parameter is a prefix/suffix
// of argument with _ splitting it off
// - param starts with `ra_fixture`
// - param is a well known name in a unary function
let param_name = param_name.trim_start_matches('_');
if param_name.is_empty() {
return true;
}
if matches!(argument, ast::Expr::PrefixExpr(prefix) if prefix.op_kind() == Some(UnaryOp::Not)) {
return false;
}
let fn_name = match callable.kind() {
hir::CallableKind::Function(it) => Some(it.name(sema.db).to_smol_str()),
_ => None,
};
let fn_name = fn_name.as_deref();
is_param_name_suffix_of_fn_name(param_name, callable, fn_name)
|| is_enum_name_similar_to_param_name(sema, argument, param_name)
|| is_argument_similar_to_param_name(argument, param_name)
|| param_name.starts_with("ra_fixture")
|| (callable.n_params() == 1 && is_obvious_param(param_name))
}
fn is_argument_similar_to_param_name(argument: &ast::Expr, param_name: &str) -> bool {
// check whether param_name and argument are the same or
// whether param_name is a prefix/suffix of argument(split at `_`)
let argument = match get_string_representation(argument) {
Some(argument) => argument,
None => return false,
};
// std is honestly too panic happy...
let str_split_at = |str: &str, at| str.is_char_boundary(at).then(|| argument.split_at(at));
let param_name = param_name.trim_start_matches('_');
let argument = argument.trim_start_matches('_');
match str_split_at(argument, param_name.len()) {
Some((prefix, rest)) if prefix.eq_ignore_ascii_case(param_name) => {
return rest.is_empty() || rest.starts_with('_');
}
_ => (),
}
match argument.len().checked_sub(param_name.len()).and_then(|at| str_split_at(argument, at)) {
Some((rest, suffix)) if param_name.eq_ignore_ascii_case(suffix) => {
return rest.is_empty() || rest.ends_with('_');
}
_ => (),
}
false
}
/// Hide the parameter name of a unary function if it is a `_` - prefixed suffix of the function's name, or equal.
///
/// `fn strip_suffix(suffix)` will be hidden.
/// `fn stripsuffix(suffix)` will not be hidden.
fn is_param_name_suffix_of_fn_name(
param_name: &str,
callable: &Callable,
fn_name: Option<&str>,
) -> bool {
match (callable.n_params(), fn_name) {
(1, Some(function)) => {
function == param_name
|| function
.len()
.checked_sub(param_name.len())
.and_then(|at| function.is_char_boundary(at).then(|| function.split_at(at)))
.map_or(false, |(prefix, suffix)| {
suffix.eq_ignore_ascii_case(param_name) && prefix.ends_with('_')
})
}
_ => false,
}
}
fn is_enum_name_similar_to_param_name(
sema: &Semantics<RootDatabase>,
argument: &ast::Expr,
param_name: &str,
) -> bool {
match sema.type_of_expr(argument).and_then(|t| t.original.as_adt()) {
Some(hir::Adt::Enum(e)) => {
to_lower_snake_case(&e.name(sema.db).to_smol_str()) == param_name
}
_ => false,
}
}
fn get_string_representation(expr: &ast::Expr) -> Option<String> {
match expr {
ast::Expr::MethodCallExpr(method_call_expr) => {
let name_ref = method_call_expr.name_ref()?;
match name_ref.text().as_str() {
"clone" | "as_ref" => method_call_expr.receiver().map(|rec| rec.to_string()),
name_ref => Some(name_ref.to_owned()),
}
}
ast::Expr::FieldExpr(field_expr) => Some(field_expr.name_ref()?.to_string()),
ast::Expr::PathExpr(path_expr) => Some(path_expr.path()?.segment()?.to_string()),
ast::Expr::PrefixExpr(prefix_expr) => get_string_representation(&prefix_expr.expr()?),
ast::Expr::RefExpr(ref_expr) => get_string_representation(&ref_expr.expr()?),
_ => None,
}
}
fn is_obvious_param(param_name: &str) -> bool {
// avoid displaying hints for common functions like map, filter, etc.
// or other obvious words used in std
let is_obvious_param_name =
matches!(param_name, "predicate" | "value" | "pat" | "rhs" | "other");
param_name.len() == 1 || is_obvious_param_name
}
fn get_callable(
sema: &Semantics<RootDatabase>,
expr: &ast::Expr,
) -> Option<(hir::Callable, ast::ArgList)> {
match expr {
ast::Expr::CallExpr(expr) => {
let descended = sema.descend_node_into_attributes(expr.clone()).pop();
let expr = descended.as_ref().unwrap_or(expr);
sema.type_of_expr(&expr.expr()?)?.original.as_callable(sema.db).zip(expr.arg_list())
}
ast::Expr::MethodCallExpr(expr) => {
let descended = sema.descend_node_into_attributes(expr.clone()).pop();
let expr = descended.as_ref().unwrap_or(expr);
sema.resolve_method_call_as_callable(expr).zip(expr.arg_list())
}
_ => None,
}
}
#[cfg(test)]
mod tests {
use expect_test::{expect, Expect};
use test_utils::extract_annotations;
use crate::{fixture, inlay_hints::InlayHintsConfig};
const TEST_CONFIG: InlayHintsConfig = InlayHintsConfig {
type_hints: true,
parameter_hints: true,
chaining_hints: true,
hide_named_constructor_hints: false,
max_length: None,
};
#[track_caller]
fn check(ra_fixture: &str) {
check_with_config(TEST_CONFIG, ra_fixture);
}
#[track_caller]
fn check_params(ra_fixture: &str) {
check_with_config(
InlayHintsConfig {
parameter_hints: true,
type_hints: false,
chaining_hints: false,
hide_named_constructor_hints: false,
max_length: None,
},
ra_fixture,
);
}
#[track_caller]
fn check_types(ra_fixture: &str) {
check_with_config(
InlayHintsConfig {
parameter_hints: false,
type_hints: true,
chaining_hints: false,
hide_named_constructor_hints: false,
max_length: None,
},
ra_fixture,
);
}
#[track_caller]
fn check_chains(ra_fixture: &str) {
check_with_config(
InlayHintsConfig {
parameter_hints: false,
type_hints: false,
chaining_hints: true,
hide_named_constructor_hints: false,
max_length: None,
},
ra_fixture,
);
}
#[track_caller]
fn check_with_config(config: InlayHintsConfig, ra_fixture: &str) {
let (analysis, file_id) = fixture::file(ra_fixture);
let expected = extract_annotations(&*analysis.file_text(file_id).unwrap());
let inlay_hints = analysis.inlay_hints(&config, file_id).unwrap();
let actual =
inlay_hints.into_iter().map(|it| (it.range, it.label.to_string())).collect::<Vec<_>>();
assert_eq!(expected, actual, "\nExpected:\n{:#?}\n\nActual:\n{:#?}", expected, actual);
}
#[track_caller]
fn check_expect(config: InlayHintsConfig, ra_fixture: &str, expect: Expect) {
let (analysis, file_id) = fixture::file(ra_fixture);
let inlay_hints = analysis.inlay_hints(&config, file_id).unwrap();
expect.assert_debug_eq(&inlay_hints)
}
#[test]
fn hints_disabled() {
check_with_config(
InlayHintsConfig {
type_hints: false,
parameter_hints: false,
chaining_hints: false,
hide_named_constructor_hints: false,
max_length: None,
},
r#"
fn foo(a: i32, b: i32) -> i32 { a + b }
fn main() {
let _x = foo(4, 4);
}"#,
);
}
// Parameter hint tests
#[test]
fn param_hints_only() {
check_params(
r#"
fn foo(a: i32, b: i32) -> i32 { a + b }
fn main() {
let _x = foo(
4,
//^ a
4,
//^ b
);
}"#,
);
}
#[test]
fn param_name_similar_to_fn_name_still_hints() {
check_params(
r#"
fn max(x: i32, y: i32) -> i32 { x + y }
fn main() {
let _x = max(
4,
//^ x
4,
//^ y
);
}"#,
);
}
#[test]
fn param_name_similar_to_fn_name() {
check_params(
r#"
fn param_with_underscore(with_underscore: i32) -> i32 { with_underscore }
fn main() {
let _x = param_with_underscore(
4,
);
}"#,
);
check_params(
r#"
fn param_with_underscore(underscore: i32) -> i32 { underscore }
fn main() {
let _x = param_with_underscore(
4,
);
}"#,
);
}
#[test]
fn param_name_same_as_fn_name() {
check_params(
r#"
fn foo(foo: i32) -> i32 { foo }
fn main() {
let _x = foo(
4,
);
}"#,
);
}
#[test]
fn never_hide_param_when_multiple_params() {
check_params(
r#"
fn foo(foo: i32, bar: i32) -> i32 { bar + baz }
fn main() {
let _x = foo(
4,
//^ foo
8,
//^ bar
);
}"#,
);
}
#[test]
fn param_hints_look_through_as_ref_and_clone() {
check_params(
r#"
fn foo(bar: i32, baz: f32) {}
fn main() {
let bar = 3;
let baz = &"baz";
let fez = 1.0;
foo(bar.clone(), bar.clone());
//^^^^^^^^^^^ baz
foo(bar.as_ref(), bar.as_ref());
//^^^^^^^^^^^^ baz
}
"#,
);
}
#[test]
fn self_param_hints() {
check_params(
r#"
struct Foo;
impl Foo {
fn foo(self: Self) {}
fn bar(self: &Self) {}
}
fn main() {
Foo::foo(Foo);
//^^^ self
Foo::bar(&Foo);
//^^^^ self
}
"#,
)
}
#[test]
fn param_name_hints_show_for_literals() {
check_params(
r#"pub fn test(a: i32, b: i32) -> [i32; 2] { [a, b] }
fn main() {
test(
0xa_b,
//^^^^^ a
0xa_b,
//^^^^^ b
);
}"#,
)
}
#[test]
fn function_call_parameter_hint() {
check_params(
r#"
//- minicore: option
struct FileId {}
struct SmolStr {}
struct TextRange {}
struct SyntaxKind {}
struct NavigationTarget {}
struct Test {}
impl Test {
fn method(&self, mut param: i32) -> i32 { param * 2 }
fn from_syntax(
file_id: FileId,
name: SmolStr,
focus_range: Option<TextRange>,
full_range: TextRange,
kind: SyntaxKind,
docs: Option<String>,
) -> NavigationTarget {
NavigationTarget {}
}
}
fn test_func(mut foo: i32, bar: i32, msg: &str, _: i32, last: i32) -> i32 {
foo + bar
}
fn main() {
let not_literal = 1;
let _: i32 = test_func(1, 2, "hello", 3, not_literal);
//^ foo ^ bar ^^^^^^^ msg ^^^^^^^^^^^ last
let t: Test = Test {};
t.method(123);
//^^^ param
Test::method(&t, 3456);
//^^ self ^^^^ param
Test::from_syntax(
FileId {},
//^^^^^^^^^ file_id
"impl".into(),
//^^^^^^^^^^^^^ name
None,
//^^^^ focus_range
TextRange {},
//^^^^^^^^^^^^ full_range
SyntaxKind {},
//^^^^^^^^^^^^^ kind
None,
//^^^^ docs
);
}"#,
);
}
#[test]
fn parameter_hint_heuristics() {
check_params(
r#"
fn check(ra_fixture_thing: &str) {}
fn map(f: i32) {}
fn filter(predicate: i32) {}
fn strip_suffix(suffix: &str) {}
fn stripsuffix(suffix: &str) {}
fn same(same: u32) {}
fn same2(_same2: u32) {}
fn enum_matches_param_name(completion_kind: CompletionKind) {}
fn foo(param: u32) {}
fn bar(param_eter: u32) {}
enum CompletionKind {
Keyword,
}
fn non_ident_pat((a, b): (u32, u32)) {}
fn main() {
const PARAM: u32 = 0;
foo(PARAM);
foo(!PARAM);
// ^^^^^^ param
check("");
map(0);
filter(0);
strip_suffix("");
stripsuffix("");
//^^ suffix
same(0);
same2(0);
enum_matches_param_name(CompletionKind::Keyword);
let param = 0;
foo(param);
let param_end = 0;
foo(param_end);
let start_param = 0;
foo(start_param);
let param2 = 0;
foo(param2);
//^^^^^^ param
let param_eter = 0;
bar(param_eter);
let param_eter_end = 0;
bar(param_eter_end);
let start_param_eter = 0;
bar(start_param_eter);
let param_eter2 = 0;
bar(param_eter2);
//^^^^^^^^^^^ param_eter
non_ident_pat((0, 0));
}"#,
);
}
// Type-Hint tests
#[test]
fn type_hints_only() {
check_types(
r#"
fn foo(a: i32, b: i32) -> i32 { a + b }
fn main() {
let _x = foo(4, 4);
//^^ i32
}"#,
);
}
#[test]
fn type_hints_bindings_after_at() {
check_types(
r#"
//- minicore: option
fn main() {
let ref foo @ bar @ ref mut baz = 0;
//^^^ &i32
//^^^ i32
//^^^ &mut i32
let [x @ ..] = [0];
//^ [i32; 1]
if let x @ Some(_) = Some(0) {}
//^ Option<i32>
let foo @ (bar, baz) = (3, 3);
//^^^ (i32, i32)
//^^^ i32
//^^^ i32
}"#,
);
}
#[test]
fn default_generic_types_should_not_be_displayed() {
check(
r#"
struct Test<K, T = u8> { k: K, t: T }
fn main() {
let zz = Test { t: 23u8, k: 33 };
//^^ Test<i32>
let zz_ref = &zz;
//^^^^^^ &Test<i32>
let test = || zz;
//^^^^ || -> Test<i32>
}"#,
);
}
#[test]
fn shorten_iterators_in_associated_params() {
check_types(
r#"
//- minicore: iterators
use core::iter;
pub struct SomeIter<T> {}
impl<T> SomeIter<T> {
pub fn new() -> Self { SomeIter {} }
pub fn push(&mut self, t: T) {}
}
impl<T> Iterator for SomeIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
None
}
}
fn main() {
let mut some_iter = SomeIter::new();
//^^^^^^^^^ SomeIter<Take<Repeat<i32>>>
some_iter.push(iter::repeat(2).take(2));
let iter_of_iters = some_iter.take(2);
//^^^^^^^^^^^^^ impl Iterator<Item = impl Iterator<Item = i32>>
}
"#,
);
}
#[test]
fn infer_call_method_return_associated_types_with_generic() {
check_types(
r#"
pub trait Default {
fn default() -> Self;
}
pub trait Foo {
type Bar: Default;
}
pub fn quux<T: Foo>() -> T::Bar {
let y = Default::default();
//^ <T as Foo>::Bar
y
}
"#,
);
}
#[test]
fn fn_hints() {
check_types(
r#"
//- minicore: fn, sized
fn foo() -> impl Fn() { loop {} }
fn foo1() -> impl Fn(f64) { loop {} }
fn foo2() -> impl Fn(f64, f64) { loop {} }
fn foo3() -> impl Fn(f64, f64) -> u32 { loop {} }
fn foo4() -> &'static dyn Fn(f64, f64) -> u32 { loop {} }
fn foo5() -> &'static dyn Fn(&'static dyn Fn(f64, f64) -> u32, f64) -> u32 { loop {} }
fn foo6() -> impl Fn(f64, f64) -> u32 + Sized { loop {} }
fn foo7() -> *const (impl Fn(f64, f64) -> u32 + Sized) { loop {} }
fn main() {
let foo = foo();
// ^^^ impl Fn()
let foo = foo1();
// ^^^ impl Fn(f64)
let foo = foo2();
// ^^^ impl Fn(f64, f64)
let foo = foo3();
// ^^^ impl Fn(f64, f64) -> u32
let foo = foo4();
// ^^^ &dyn Fn(f64, f64) -> u32
let foo = foo5();
// ^^^ &dyn Fn(&dyn Fn(f64, f64) -> u32, f64) -> u32
let foo = foo6();
// ^^^ impl Fn(f64, f64) -> u32
let foo = foo7();
// ^^^ *const impl Fn(f64, f64) -> u32
}
"#,
)
}
#[test]
fn fn_hints_ptr_rpit_fn_parentheses() {
check_types(
r#"
//- minicore: fn, sized
trait Trait {}
fn foo1() -> *const impl Fn() { loop {} }
fn foo2() -> *const (impl Fn() + Sized) { loop {} }
fn foo3() -> *const (impl Fn() + ?Sized) { loop {} }
fn foo4() -> *const (impl Sized + Fn()) { loop {} }
fn foo5() -> *const (impl ?Sized + Fn()) { loop {} }
fn foo6() -> *const (impl Fn() + Trait) { loop {} }
fn foo7() -> *const (impl Fn() + Sized + Trait) { loop {} }
fn foo8() -> *const (impl Fn() + ?Sized + Trait) { loop {} }
fn foo9() -> *const (impl Fn() -> u8 + ?Sized) { loop {} }
fn foo10() -> *const (impl Fn() + Sized + ?Sized) { loop {} }
fn main() {
let foo = foo1();
// ^^^ *const impl Fn()
let foo = foo2();
// ^^^ *const impl Fn()
let foo = foo3();
// ^^^ *const (impl Fn() + ?Sized)
let foo = foo4();
// ^^^ *const impl Fn()
let foo = foo5();
// ^^^ *const (impl Fn() + ?Sized)
let foo = foo6();
// ^^^ *const (impl Fn() + Trait)
let foo = foo7();
// ^^^ *const (impl Fn() + Trait)
let foo = foo8();
// ^^^ *const (impl Fn() + Trait + ?Sized)
let foo = foo9();
// ^^^ *const (impl Fn() -> u8 + ?Sized)
let foo = foo10();
// ^^^ *const impl Fn()
}
"#,
)
}
#[test]
fn unit_structs_have_no_type_hints() {
check_types(
r#"
//- minicore: result
struct SyntheticSyntax;
fn main() {
match Ok(()) {
Ok(_) => (),
Err(SyntheticSyntax) => (),
}
}"#,
);
}
#[test]
fn let_statement() {
check_types(
r#"
#[derive(PartialEq)]
enum Option<T> { None, Some(T) }
#[derive(PartialEq)]
struct Test { a: Option<u32>, b: u8 }
fn main() {
struct InnerStruct {}
let test = 54;
//^^^^ i32
let test: i32 = 33;
let mut test = 33;
//^^^^ i32
let _ = 22;
let test = "test";
//^^^^ &str
let test = InnerStruct {};
//^^^^ InnerStruct
let test = unresolved();
let test = (42, 'a');
//^^^^ (i32, char)
let (a, (b, (c,)) = (2, (3, (9.2,));
//^ i32 ^ i32 ^ f64
let &x = &92;
//^ i32
}"#,
);
}
#[test]
fn if_expr() {
check_types(
r#"
//- minicore: option
struct Test { a: Option<u32>, b: u8 }
fn main() {
let test = Some(Test { a: Some(3), b: 1 });
//^^^^ Option<Test>
if let None = &test {};
if let test = &test {};
//^^^^ &Option<Test>
if let Some(test) = &test {};
//^^^^ &Test
if let Some(Test { a, b }) = &test {};
//^ &Option<u32> ^ &u8
if let Some(Test { a: x, b: y }) = &test {};
//^ &Option<u32> ^ &u8
if let Some(Test { a: Some(x), b: y }) = &test {};
//^ &u32 ^ &u8
if let Some(Test { a: None, b: y }) = &test {};
//^ &u8
if let Some(Test { b: y, .. }) = &test {};
//^ &u8
if test == None {}
}"#,
);
}
#[test]
fn while_expr() {
check_types(
r#"
//- minicore: option
struct Test { a: Option<u32>, b: u8 }
fn main() {
let test = Some(Test { a: Some(3), b: 1 });
//^^^^ Option<Test>
while let Some(Test { a: Some(x), b: y }) = &test {};
//^ &u32 ^ &u8
}"#,
);
}
#[test]
fn match_arm_list() {
check_types(
r#"
//- minicore: option
struct Test { a: Option<u32>, b: u8 }
fn main() {
match Some(Test { a: Some(3), b: 1 }) {
None => (),
test => (),
//^^^^ Option<Test>
Some(Test { a: Some(x), b: y }) => (),
//^ u32 ^ u8
_ => {}
}
}"#,
);
}
#[test]
fn incomplete_for_no_hint() {
check_types(
r#"
fn main() {
let data = &[1i32, 2, 3];
//^^^^ &[i32; 3]
for i
}"#,
);
check(
r#"
pub struct Vec<T> {}
impl<T> Vec<T> {
pub fn new() -> Self { Vec {} }
pub fn push(&mut self, t: T) {}
}
impl<T> IntoIterator for Vec<T> {
type Item=T;
}
fn main() {
let mut data = Vec::new();
//^^^^ Vec<&str>
data.push("foo");
for i in
println!("Unit expr");
}
"#,
);
}
#[test]
fn complete_for_hint() {
check_types(
r#"
//- minicore: iterator
pub struct Vec<T> {}
impl<T> Vec<T> {
pub fn new() -> Self { Vec {} }
pub fn push(&mut self, t: T) {}
}
impl<T> IntoIterator for Vec<T> {
type Item=T;
}
fn main() {
let mut data = Vec::new();
//^^^^ Vec<&str>
data.push("foo");
for i in data {
//^ &str
let z = i;
//^ &str
}
}
"#,
);
}
#[test]
fn multi_dyn_trait_bounds() {
check_types(
r#"
pub struct Vec<T> {}
impl<T> Vec<T> {
pub fn new() -> Self { Vec {} }
}
pub struct Box<T> {}
trait Display {}
trait Sync {}
fn main() {
// The block expression wrapping disables the constructor hint hiding logic
let _v = { Vec::<Box<&(dyn Display + Sync)>>::new() };
//^^ Vec<Box<&(dyn Display + Sync)>>
let _v = { Vec::<Box<*const (dyn Display + Sync)>>::new() };
//^^ Vec<Box<*const (dyn Display + Sync)>>
let _v = { Vec::<Box<dyn Display + Sync>>::new() };
//^^ Vec<Box<dyn Display + Sync>>
}
"#,
);
}
#[test]
fn shorten_iterator_hints() {
check_types(
r#"
//- minicore: iterators
use core::iter;
struct MyIter;
impl Iterator for MyIter {
type Item = ();
fn next(&mut self) -> Option<Self::Item> {
None
}
}
fn main() {
let _x = MyIter;
//^^ MyIter
let _x = iter::repeat(0);
//^^ impl Iterator<Item = i32>
fn generic<T: Clone>(t: T) {
let _x = iter::repeat(t);
//^^ impl Iterator<Item = T>
let _chained = iter::repeat(t).take(10);
//^^^^^^^^ impl Iterator<Item = T>
}
}
"#,
);
}
#[test]
fn skip_constructor_and_enum_type_hints() {
check_with_config(
InlayHintsConfig {
type_hints: true,
parameter_hints: true,
chaining_hints: true,
hide_named_constructor_hints: true,
max_length: None,
},
r#"
//- minicore: try, option
use core::ops::ControlFlow;
mod x {
pub mod y { pub struct Foo; }
pub struct Foo;
pub enum AnotherEnum {
Variant()
};
}
struct Struct;
struct TupleStruct();
impl Struct {
fn new() -> Self {
Struct
}
fn try_new() -> ControlFlow<(), Self> {
ControlFlow::Continue(Struct)
}
}
struct Generic<T>(T);
impl Generic<i32> {
fn new() -> Self {
Generic(0)
}
}
enum Enum {
Variant(u32)
}
fn times2(value: i32) -> i32 {
2 * value
}
fn main() {
let enumb = Enum::Variant(0);
let strukt = x::Foo;
let strukt = x::y::Foo;
let strukt = Struct;
let strukt = Struct::new();
let tuple_struct = TupleStruct();
let generic0 = Generic::new();
// ^^^^^^^^ Generic<i32>
let generic1 = Generic(0);
// ^^^^^^^^ Generic<i32>
let generic2 = Generic::<i32>::new();
let generic3 = <Generic<i32>>::new();
let generic4 = Generic::<i32>(0);
let option = Some(0);
// ^^^^^^ Option<i32>
let func = times2;
// ^^^^ fn times2(i32) -> i32
let closure = |x: i32| x * 2;
// ^^^^^^^ |i32| -> i32
}
fn fallible() -> ControlFlow<()> {
let strukt = Struct::try_new()?;
}
"#,
);
}
#[test]
fn shows_constructor_type_hints_when_enabled() {
check_types(
r#"
//- minicore: try
use core::ops::ControlFlow;
struct Struct;
struct TupleStruct();
impl Struct {
fn new() -> Self {
Struct
}
fn try_new() -> ControlFlow<(), Self> {
ControlFlow::Continue(Struct)
}
}
struct Generic<T>(T);
impl Generic<i32> {
fn new() -> Self {
Generic(0)
}
}
fn main() {
let strukt = Struct::new();
// ^^^^^^ Struct
let tuple_struct = TupleStruct();
// ^^^^^^^^^^^^ TupleStruct
let generic0 = Generic::new();
// ^^^^^^^^ Generic<i32>
let generic1 = Generic::<i32>::new();
// ^^^^^^^^ Generic<i32>
let generic2 = <Generic<i32>>::new();
// ^^^^^^^^ Generic<i32>
}
fn fallible() -> ControlFlow<()> {
let strukt = Struct::try_new()?;
// ^^^^^^ Struct
}
"#,
);
}
#[test]
fn closures() {
check(
r#"
fn main() {
let mut start = 0;
//^^^^^ i32
(0..2).for_each(|increment| { start += increment; });
//^^^^^^^^^ i32
let multiply =
//^^^^^^^^ |i32, i32| -> i32
| a, b| a * b
//^ i32 ^ i32
;
let _: i32 = multiply(1, 2);
let multiply_ref = &multiply;
//^^^^^^^^^^^^ &|i32, i32| -> i32
let return_42 = || 42;
//^^^^^^^^^ || -> i32
}"#,
);
}
#[test]
fn hint_truncation() {
check_with_config(
InlayHintsConfig { max_length: Some(8), ..TEST_CONFIG },
r#"
struct Smol<T>(T);
struct VeryLongOuterName<T>(T);
fn main() {
let a = Smol(0u32);
//^ Smol<u32>
let b = VeryLongOuterName(0usize);
//^ VeryLongOuterName<…>
let c = Smol(Smol(0u32))
//^ Smol<Smol<…>>
}"#,
);
}
// Chaining hint tests
#[test]
fn chaining_hints_ignore_comments() {
check_expect(
InlayHintsConfig {
parameter_hints: false,
type_hints: false,
chaining_hints: true,
hide_named_constructor_hints: false,
max_length: None,
},
r#"
struct A(B);
impl A { fn into_b(self) -> B { self.0 } }
struct B(C);
impl B { fn into_c(self) -> C { self.0 } }
struct C;
fn main() {
let c = A(B(C))
.into_b() // This is a comment
// This is another comment
.into_c();
}
"#,
expect![[r#"
[
InlayHint {
range: 147..172,
kind: ChainingHint,
label: "B",
},
InlayHint {
range: 147..154,
kind: ChainingHint,
label: "A",
},
]
"#]],
);
}
#[test]
fn chaining_hints_without_newlines() {
check_chains(
r#"
struct A(B);
impl A { fn into_b(self) -> B { self.0 } }
struct B(C);
impl B { fn into_c(self) -> C { self.0 } }
struct C;
fn main() {
let c = A(B(C)).into_b().into_c();
}"#,
);
}
#[test]
fn struct_access_chaining_hints() {
check_expect(
InlayHintsConfig {
parameter_hints: false,
type_hints: false,
chaining_hints: true,
hide_named_constructor_hints: false,
max_length: None,
},
r#"
struct A { pub b: B }
struct B { pub c: C }
struct C(pub bool);
struct D;
impl D {
fn foo(&self) -> i32 { 42 }
}
fn main() {
let x = A { b: B { c: C(true) } }
.b
.c
.0;
let x = D
.foo();
}"#,
expect![[r#"
[
InlayHint {
range: 143..190,
kind: ChainingHint,
label: "C",
},
InlayHint {
range: 143..179,
kind: ChainingHint,
label: "B",
},
]
"#]],
);
}
#[test]
fn generic_chaining_hints() {
check_expect(
InlayHintsConfig {
parameter_hints: false,
type_hints: false,
chaining_hints: true,
hide_named_constructor_hints: false,
max_length: None,
},
r#"
struct A<T>(T);
struct B<T>(T);
struct C<T>(T);
struct X<T,R>(T, R);
impl<T> A<T> {
fn new(t: T) -> Self { A(t) }
fn into_b(self) -> B<T> { B(self.0) }
}
impl<T> B<T> {
fn into_c(self) -> C<T> { C(self.0) }
}
fn main() {
let c = A::new(X(42, true))
.into_b()
.into_c();
}
"#,
expect![[r#"
[
InlayHint {
range: 246..283,
kind: ChainingHint,
label: "B<X<i32, bool>>",
},
InlayHint {
range: 246..265,
kind: ChainingHint,
label: "A<X<i32, bool>>",
},
]
"#]],
);
}
#[test]
fn shorten_iterator_chaining_hints() {
check_expect(
InlayHintsConfig {
parameter_hints: false,
type_hints: false,
chaining_hints: true,
hide_named_constructor_hints: false,
max_length: None,
},
r#"
//- minicore: iterators
use core::iter;
struct MyIter;
impl Iterator for MyIter {
type Item = ();
fn next(&mut self) -> Option<Self::Item> {
None
}
}
fn main() {
let _x = MyIter.by_ref()
.take(5)
.by_ref()
.take(5)
.by_ref();
}
"#,
expect![[r#"
[
InlayHint {
range: 174..241,
kind: ChainingHint,
label: "impl Iterator<Item = ()>",
},
InlayHint {
range: 174..224,
kind: ChainingHint,
label: "impl Iterator<Item = ()>",
},
InlayHint {
range: 174..206,
kind: ChainingHint,
label: "impl Iterator<Item = ()>",
},
InlayHint {
range: 174..189,
kind: ChainingHint,
label: "&mut MyIter",
},
]
"#]],
);
}
#[test]
fn hints_in_attr_call() {
check_expect(
TEST_CONFIG,
r#"
//- proc_macros: identity, input_replace
struct Struct;
impl Struct {
fn chain(self) -> Self {
self
}
}
#[proc_macros::identity]
fn main() {
let strukt = Struct;
strukt
.chain()
.chain()
.chain();
Struct::chain(strukt);
}
"#,
expect![[r#"
[
InlayHint {
range: 124..130,
kind: TypeHint,
label: "Struct",
},
InlayHint {
range: 145..185,
kind: ChainingHint,
label: "Struct",
},
InlayHint {
range: 145..168,
kind: ChainingHint,
label: "Struct",
},
InlayHint {
range: 222..228,
kind: ParameterHint,
label: "self",
},
]
"#]],
);
}
}
| 25.678894 | 168 | 0.497396 |
621a87eb9ebc4cd69bae2e20365e1b6ec7d37fe5 | 1,693 | //! Middleware types.
use std::sync::Arc;
#[doc(inline)]
pub use http_service::HttpService;
use crate::endpoint::DynEndpoint;
use crate::utils::BoxFuture;
use crate::{Request, Response};
// mod compression;
pub(crate) mod cookies;
mod cors;
// mod default_headers;
mod logger;
// pub use compression::{Compression, Decompression};
pub use cors::{Cors, Origin};
// pub use default_headers::DefaultHeaders;
pub use logger::RequestLogger;
/// Middleware that wraps around remaining middleware chain.
pub trait Middleware<State>: 'static + Send + Sync {
/// Asynchronously handle the request, and return a response.
fn handle<'a>(&'a self, cx: Request<State>, next: Next<'a, State>) -> BoxFuture<'a, Response>;
}
impl<State, F> Middleware<State> for F
where
F: Send
+ Sync
+ 'static
+ for<'a> Fn(Request<State>, Next<'a, State>) -> BoxFuture<'a, Response>,
{
fn handle<'a>(&'a self, req: Request<State>, next: Next<'a, State>) -> BoxFuture<'a, Response> {
(self)(req, next)
}
}
/// The remainder of a middleware chain, including the endpoint.
#[allow(missing_debug_implementations)]
pub struct Next<'a, State> {
pub(crate) endpoint: &'a DynEndpoint<State>,
pub(crate) next_middleware: &'a [Arc<dyn Middleware<State>>],
}
impl<'a, State: 'static> Next<'a, State> {
/// Asynchronously execute the remaining middleware chain.
pub fn run(mut self, req: Request<State>) -> BoxFuture<'a, Response> {
if let Some((current, next)) = self.next_middleware.split_first() {
self.next_middleware = next;
current.handle(req, self)
} else {
self.endpoint.call(req)
}
}
}
| 28.694915 | 100 | 0.652688 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.